From 05e0bcef1c2165c556b910314312866dc4a667b7 Mon Sep 17 00:00:00 2001 From: Charles McGarvey Date: Thu, 21 Apr 2022 19:50:20 -0600 Subject: [PATCH] Add recursive transactions --- lib/File/KDBX.pm | 223 +++++++++++++++++----------- lib/File/KDBX/Cipher/Stream.pm | 60 ++++++-- lib/File/KDBX/Constants.pm | 19 +++ lib/File/KDBX/Dumper.pm | 55 ++++++- lib/File/KDBX/Dumper/XML.pm | 89 ++++++++--- lib/File/KDBX/Entry.pm | 104 ++++++++++--- lib/File/KDBX/Group.pm | 41 +++-- lib/File/KDBX/Loader.pm | 29 +++- lib/File/KDBX/Loader/XML.pm | 2 - lib/File/KDBX/Object.pm | 263 ++++++++++++++++++++++++++++++++- lib/File/KDBX/Transaction.pm | 59 ++++++-- lib/File/KDBX/Util.pm | 28 +++- t/entry.t | 39 ++++- t/kdb.t | 2 +- t/object.t | 89 ++++++++++- 15 files changed, 917 insertions(+), 185 deletions(-) diff --git a/lib/File/KDBX.pm b/lib/File/KDBX.pm index e5fcb27..2326b9b 100644 --- a/lib/File/KDBX.pm +++ b/lib/File/KDBX.pm @@ -9,7 +9,7 @@ use Devel::GlobalDestruction; use File::KDBX::Constants qw(:all); use File::KDBX::Error; use File::KDBX::Safe; -use File::KDBX::Util qw(:empty erase generate_uuid search simple_expression_query snakify); +use File::KDBX::Util qw(:empty :uuid :search erase simple_expression_query snakify); use Hash::Util::FieldHash qw(fieldhashes); use List::Util qw(any); use Ref::Util qw(is_ref is_arrayref is_plain_hashref); @@ -223,31 +223,6 @@ sub user_agent_string { __PACKAGE__, $VERSION, @Config::Config{qw(package version osname osvers archname)}); } -=attr sig1 - -=attr sig2 - -=attr version - -=attr headers - -=attr inner_headers - -=attr meta - -=attr binaries - -=attr deleted_objects - -=attr raw - - $value = $kdbx->$attr; - $kdbx->$attr($value); - -Get and set attributes. - -=cut - my %ATTRS = ( sig1 => KDBX_SIG1, sig2 => KDBX_SIG2_2, @@ -283,28 +258,28 @@ my %ATTRS_META = ( generator => '', header_hash => '', database_name => '', - database_name_changed => sub { gmtime }, + database_name_changed => sub { scalar gmtime }, database_description => '', - database_description_changed => sub { gmtime }, + database_description_changed => sub { scalar gmtime }, default_username => '', - default_username_changed => sub { gmtime }, + default_username_changed => sub { scalar gmtime }, maintenance_history_days => 0, color => '', - master_key_changed => sub { gmtime }, + master_key_changed => sub { scalar gmtime }, master_key_change_rec => -1, master_key_change_force => -1, # memory_protection => sub { +{} }, custom_icons => sub { +{} }, recycle_bin_enabled => true, recycle_bin_uuid => "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0", - recycle_bin_changed => sub { gmtime }, + recycle_bin_changed => sub { scalar gmtime }, entry_templates_group => "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0", - entry_templates_group_changed => sub { gmtime }, + entry_templates_group_changed => sub { scalar gmtime }, last_selected_group => "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0", last_top_visible_group => "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0", history_max_items => HISTORY_DEFAULT_MAX_ITEMS, history_max_size => HISTORY_DEFAULT_MAX_SIZE, - settings_changed => sub { gmtime }, + settings_changed => sub { scalar gmtime }, # binaries => sub { +{} }, # custom_data => sub { +{} }, ); @@ -314,39 +289,9 @@ my %ATTRS_MEMORY_PROTECTION = ( protect_password => true, protect_url => false, protect_notes => false, - auto_enable_visual_hiding => false, + # auto_enable_visual_hiding => false, ); -sub _update_group_uuid { - my $self = shift; - my $old_uuid = shift // return; - my $new_uuid = shift; - - my $meta = $self->meta; - $self->recycle_bin_uuid($new_uuid) if $old_uuid eq ($meta->{recycle_bin_uuid} // ''); - $self->entry_templates_group($new_uuid) if $old_uuid eq ($meta->{entry_templates_group} // ''); - $self->last_selected_group($new_uuid) if $old_uuid eq ($meta->{last_selected_group} // ''); - $self->last_top_visible_group($new_uuid) if $old_uuid eq ($meta->{last_top_visible_group} // ''); - - for my $group (@{$self->all_groups}) { - $group->last_top_visible_entry($new_uuid) if $old_uuid eq ($group->{last_top_visible_entry} // ''); - $group->previous_parent_group($new_uuid) if $old_uuid eq ($group->{previous_parent_group} // ''); - } - for my $entry (@{$self->all_entries}) { - $entry->previous_parent_group($new_uuid) if $old_uuid eq ($entry->{previous_parent_group} // ''); - } -} - -sub _update_entry_uuid { - my $self = shift; - my $old_uuid = shift // return; - my $new_uuid = shift; - - for my $entry (@{$self->all_entries}) { - $entry->previous_parent_group($new_uuid) if $old_uuid eq ($entry->{previous_parent_group} // ''); - } -} - while (my ($attr, $default) = each %ATTRS) { no strict 'refs'; ## no critic (ProhibitNoStrict) *{$attr} = sub { @@ -519,7 +464,7 @@ Every database has only a single root group at a time. Some old KDB files might When reading such files, a single implicit root group is created to contain the other explicit groups. When writing to such a format, if the root group looks like it was implicitly created then it won't be written and the resulting file might have multiple root groups. This allows working with older files without changing -their written internal structure while still adhering to the modern restrictions while the database is opened. +their written internal structure while still adhering to modern semantics while the database is opened. B The root group of a KDBX database contains all of the database's entries and other groups. If you replace the root group, you are essentially replacing the entire database contents with something else. @@ -770,7 +715,13 @@ sub find_entries { search => $args{search}, history => $args{history}, ); - return @{search($self->all_entries(%all_entries), is_arrayref($query) ? @$query : $query)}; + my $limit = delete $args{limit}; + if (defined $limit) { + return @{search_limited($self->all_entries(%all_entries), is_arrayref($query) ? @$query : $query, $limit)}; + } + else { + return @{search($self->all_entries(%all_entries), is_arrayref($query) ? @$query : $query)}; + } } sub find_entries_simple { @@ -1055,15 +1006,16 @@ sub resolve_reference { P => 'expanded_password', A => 'expanded_url', N => 'expanded_notes', - I => 'id', + I => 'uuid', O => 'other_strings', ); $wanted = $fields{$wanted} or return; $search_in = $fields{$search_in} or return; - my $query = simple_expression_query($text, ($search_in eq 'id' ? 'eq' : '=~'), $search_in); + my $query = $search_in eq 'uuid' ? query($search_in => uuid($text)) + : simple_expression_query($text, '=~', $search_in); - my ($entry) = $self->find_entries($query); + my ($entry) = $self->find_entries($query, limit => 1); $entry or return; return $entry->$wanted; @@ -1190,13 +1142,6 @@ itself to allow method chaining. =cut -sub peek { - my $self = shift; - my $string = shift; - my $safe = $self->_safe or return; - return $safe->peek($string); -} - sub unlock { my $self = shift; my $safe = $self->_safe or return $self; @@ -1207,14 +1152,43 @@ sub unlock { return $self; } -# sub unlock_scoped { -# my $self = shift; -# return if !$self->is_locked; -# require Scope::Guard; -# my $guard = Scope::Guard->new(sub { $self->lock }); -# $self->unlock; -# return $guard; -# } +=method unlock_scoped + + $guard = $kdbx->unlock_scoped; + +Unlock a database temporarily, relocking when the guard is released (typically at the end of a scope). Returns +C if the database is already unlocked. + +See L and L. + +=cut + +sub unlock_scoped { + throw 'Programmer error: Cannot call unlock_scoped in void context' if !defined wantarray; + my $self = shift; + return if !$self->is_locked; + require Scope::Guard; + my $guard = Scope::Guard->new(sub { $self->lock }); + $self->unlock; + return $guard; +} + +=method peek + + $string = $kdbx->peek(\%string); + $string = $kdbx->peek(\%binary); + +Peek at the value of a protected string or binary without unlocking the whole database. The argument can be +a string or binary hashref as returned by L or L. + +=cut + +sub peek { + my $self = shift; + my $string = shift; + my $safe = $self->_safe or return; + return $safe->peek($string); +} =method is_locked @@ -1464,6 +1438,64 @@ sub check { ######################################################################################### +sub _handle_signal { + my $self = shift; + my $object = shift; + my $type = shift; + + my %handlers = ( + 'entry.uuid.changed' => \&_update_entry_uuid, + 'group.uuid.changed' => \&_update_group_uuid, + ); + my $handler = $handlers{$type} or return; + $self->$handler($object, @_); +} + +sub _update_group_uuid { + my $self = shift; + my $object = shift; + my $new_uuid = shift; + my $old_uuid = shift // return; + + my $meta = $self->meta; + $self->recycle_bin_uuid($new_uuid) if $old_uuid eq ($meta->{recycle_bin_uuid} // ''); + $self->entry_templates_group($new_uuid) if $old_uuid eq ($meta->{entry_templates_group} // ''); + $self->last_selected_group($new_uuid) if $old_uuid eq ($meta->{last_selected_group} // ''); + $self->last_top_visible_group($new_uuid) if $old_uuid eq ($meta->{last_top_visible_group} // ''); + + for my $group (@{$self->all_groups}) { + $group->last_top_visible_entry($new_uuid) if $old_uuid eq ($group->{last_top_visible_entry} // ''); + $group->previous_parent_group($new_uuid) if $old_uuid eq ($group->{previous_parent_group} // ''); + } + for my $entry (@{$self->all_entries}) { + $entry->previous_parent_group($new_uuid) if $old_uuid eq ($entry->{previous_parent_group} // ''); + } +} + +sub _update_entry_uuid { + my $self = shift; + my $object = shift; + my $new_uuid = shift; + my $old_uuid = shift // return; + + my $old_pretty = format_uuid($old_uuid); + my $new_pretty = format_uuid($new_uuid); + my $fieldref_match = qr/\{REF:([TUPANI])\@I:\Q$old_pretty\E\}/is; + + for my $entry (@{$self->all_entries}) { + $entry->previous_parent_group($new_uuid) if $old_uuid eq ($entry->{previous_parent_group} // ''); + + for my $string (values %{$entry->strings}) { + next if !defined $string->{value} || $string->{value} !~ $fieldref_match; + my $txn = $entry->begin_work; + $string->{value} =~ s/$fieldref_match/{REF:$1\@I:$new_pretty}/g; + $txn->commit; + } + } +} + +######################################################################################### + =attr comment A text string associated with the database. Often unset. @@ -2145,4 +2177,27 @@ when trying to use such features with undersized IVs. L is a much older alternative. It's good but has a backlog of bugs and lacks support for newer KDBX features. +=attr sig1 + +=attr sig2 + +=attr version + +=attr headers + +=attr inner_headers + +=attr meta + +=attr binaries + +=attr deleted_objects + +=attr raw + + $value = $kdbx->$attr; + $kdbx->$attr($value); + +Get and set attributes. + =cut diff --git a/lib/File/KDBX/Cipher/Stream.pm b/lib/File/KDBX/Cipher/Stream.pm index 1b9aeca..e904c0f 100644 --- a/lib/File/KDBX/Cipher/Stream.pm +++ b/lib/File/KDBX/Cipher/Stream.pm @@ -7,6 +7,7 @@ use strict; use Crypt::Digest qw(digest_data); use File::KDBX::Constants qw(:cipher :random_stream); use File::KDBX::Error; +use Scalar::Util qw(blessed); use Module::Load; use namespace::clean; @@ -42,27 +43,49 @@ sub init { return $self; } +=method crypt + + $ciphertext = $cipher->crypt($plaintext); + $plaintext = $cipher->crypt($ciphertext); + +Encrypt or decrypt some data. These ciphers are symmetric, so encryption and decryption are the same +operation. This method is an alias for both L and L. + +=cut + sub crypt { my $self = shift; my $stream = $self->_stream; return join('', map { $stream->crypt(ref $_ ? $$_ : $_) } grep { defined } @_); } +=method keystream + + $stream = $cipher->keystream; + +Access the keystream. + +=cut + sub keystream { my $self = shift; return $self->_stream->keystream(@_); } +=method dup + + $cipher_copy = $cipher->dup(%attributes); + +Get a copy of an existing cipher with the counter reset, optionally applying new attributes. + +=cut + sub dup { - my $self = shift; - my $dup = File::KDBX::Cipher->new( - stream_id => $self->stream_id, - key => $self->key, - @_, - ); - $dup->{key} = $self->key; - $dup->{iv} = $self->iv; - # FIXME - probably turn this into a proper clone method + my $self = shift; + my $class = blessed($self); + + my $dup = bless {%$self, @_}, $class; + delete $dup->{stream}; return $dup; } @@ -101,16 +124,29 @@ sub decrypt { goto &crypt } sub finish { delete $_[0]->{stream}; '' } -sub counter { $_[0]->{counter} // 0 } -sub offset { $_[0]->{offset} } - =attr algorithm + $algorithm = $cipher->algorithm; + Get the stream cipher algorithm. Can be one of C and C. +=attr counter + + $counter = $cipher->counter; + +Get the initial counter / block count into the keystream. + +=attr offset + + $offset = $cipher->offset; + +Get the initial byte offset into the keystream. This has precedence over L if both are set. + =cut sub algorithm { $_[0]->{algorithm} or throw 'Stream cipher algorithm is not set' } +sub counter { $_[0]->{counter} // 0 } +sub offset { $_[0]->{offset} } sub key_size { { Salsa20 => 32, ChaCha => 32 }->{$_[0]->{algorithm} || ''} // 0 } sub iv_size { { Salsa20 => 8, ChaCha => 12 }->{$_[0]->{algorithm} || ''} // -1 } sub block_size { 1 } diff --git a/lib/File/KDBX/Constants.pm b/lib/File/KDBX/Constants.pm index 51e7e4c..ea4e026 100644 --- a/lib/File/KDBX/Constants.pm +++ b/lib/File/KDBX/Constants.pm @@ -276,6 +276,7 @@ BEGIN { our %EXPORT_TAGS; push @{$EXPORT_TAGS{header}}, 'KDBX_HEADER'; push @{$EXPORT_TAGS{inner_header}}, 'KDBX_INNER_HEADER'; +push @{$EXPORT_TAGS{icon}}, 'icon'; $EXPORT_TAGS{all} = [map { @$_ } values %EXPORT_TAGS]; our @EXPORT_OK = sort @{$EXPORT_TAGS{all}}; @@ -301,6 +302,24 @@ for my $inner_header ( } sub KDBX_INNER_HEADER { $INNER_HEADER{$_[0]} } +my %ICON; +for my $icon ( + ICON_PASSWORD, ICON_PACKAGE_NETWORK, ICON_MESSAGEBOX_WARNING, ICON_SERVER, ICON_KLIPPER, + ICON_EDU_LANGUAGES, ICON_KCMDF, ICON_KATE, ICON_SOCKET, ICON_IDENTITY, ICON_KONTACT, ICON_CAMERA, + ICON_IRKICKFLASH, ICON_KGPG_KEY3, ICON_LAPTOP_POWER, ICON_SCANNER, ICON_MOZILLA_FIREBIRD, + ICON_CDROM_UNMOUNT, ICON_DISPLAY, ICON_MAIL_GENERIC, ICON_MISC, ICON_KORGANIZER, ICON_ASCII, ICON_ICONS, + ICON_CONNECT_ESTABLISHED, ICON_FOLDER_MAIL, ICON_FILESAVE, ICON_NFS_UNMOUNT, ICON_MESSAGE, ICON_KGPG_TERM, + ICON_KONSOLE, ICON_FILEPRINT, ICON_FSVIEW, ICON_RUN, ICON_CONFIGURE, ICON_KRFB, ICON_ARK, + ICON_KPERCENTAGE, ICON_SAMBA_UNMOUNT, ICON_HISTORY, ICON_MAIL_FIND, ICON_VECTORGFX, ICON_KCMMEMORY, + ICON_TRASHCAN_FULL, ICON_KNOTES, ICON_CANCEL, ICON_HELP, ICON_KPACKAGE, ICON_FOLDER, + ICON_FOLDER_BLUE_OPEN, ICON_FOLDER_TAR, ICON_DECRYPTED, ICON_ENCRYPTED, ICON_APPLY, ICON_SIGNATURE, + ICON_THUMBNAIL, ICON_KADDRESSBOOK, ICON_VIEW_TEXT, ICON_KGPG, ICON_PACKAGE_DEVELOPMENT, ICON_KFM_HOME, + ICON_SERVICES, ICON_TUX, ICON_FEATHER, ICON_APPLE, ICON_W, ICON_MONEY, ICON_CERTIFICATE, ICON_SMARTPHONE, +) { + $ICON{$icon} = $ICON{0+$icon} = $icon; +} +sub icon { $ICON{$_[0] // ''} // ICON_PASSWORD } + 1; __END__ diff --git a/lib/File/KDBX/Dumper.pm b/lib/File/KDBX/Dumper.pm index 6d02063..8be64be 100644 --- a/lib/File/KDBX/Dumper.pm +++ b/lib/File/KDBX/Dumper.pm @@ -237,9 +237,38 @@ sub kdbx { =attr format +Get the file format used for writing the database. Normally the format is auto-detected from the database, +which is the safest choice. Possible formats: + +=for :list +* C +* C +* C +* C (only used if explicitly set) +* C (only used if explicitly set) + +B There is a potential for data loss if you explicitly use a format that doesn't support the +features used by the KDBX database being written. + +The most common reason to explicitly specify the file format is to save a database as an unencrypted XML file: + + $kdbx->dump_file('database.xml', format => 'XML'); + =cut sub format { $_[0]->{format} } + +=attr inner_format + +Get the format of the data inside the KDBX envelope. This only applies to C and C formats. Possible +formats: + +=for :list +* C - Write the database groups and entries as XML (default) +* C - Write L instead of the actual database contents + +=cut + sub inner_format { $_[0]->{inner_format} // 'XML' } =attr min_version @@ -255,7 +284,29 @@ To generate older KDBX files unsupported by this module, try L. sub min_version { KDBX_VERSION_OLDEST } -sub upgrade { $_[0]->{upgrade} // 1 } +=attr allow_upgrade + + $bool = $dumper->allow_upgrade; + +Whether or not to allow implicitly upgrading a database to a newer version. When enabled, in order to avoid +potential data loss, the database can be upgraded as-needed in cases where the database file format version is +too low to support new features being used. + +The default is to allow upgrading. + +=cut + +sub allow_upgrade { $_[0]->{allow_upgrade} // 1 } + +=attr randomize_seeds + + $bool = $dumper->randomize_seeds; + +Whether or not to randomize seeds in a database before writing. The default is to randomize seeds, and there's +not often a good reason not to do so. If disabled, the seeds associated with the KDBX database will be used as +they are. + +=cut sub randomize_seeds { $_[0]->{randomize_seeds} // 1 } @@ -269,7 +320,7 @@ sub _dump { my $kdbx = $self->kdbx; my $min_version = $kdbx->minimum_version; - if ($kdbx->version < $min_version && $self->upgrade) { + if ($kdbx->version < $min_version && $self->allow_upgrade) { alert sprintf("Implicitly upgrading database from %x to %x\n", $kdbx->version, $min_version), version => $kdbx->version, min_version => $min_version; $kdbx->version($min_version); diff --git a/lib/File/KDBX/Dumper/XML.pm b/lib/File/KDBX/Dumper/XML.pm index 23378b6..a079aed 100644 --- a/lib/File/KDBX/Dumper/XML.pm +++ b/lib/File/KDBX/Dumper/XML.pm @@ -12,7 +12,6 @@ use File::KDBX::Error; use File::KDBX::Util qw(assert_64bit erase_scoped gzip snakify); use IO::Handle; use Scalar::Util qw(isdual looks_like_number); -use Scope::Guard; use Time::Piece; use XML::LibXML; use boolean; @@ -22,30 +21,84 @@ use parent 'File::KDBX::Dumper'; our $VERSION = '999.999'; # VERSION -sub protect { +=attr allow_protection + + $bool = $dumper->allow_protection; + +Get whether or not protected strings and binaries should be written in an encrypted stream. Default: C + +=cut + +sub allow_protection { my $self = shift; - $self->{protect} = shift if @_; - $self->{protect} //= 1; + $self->{allow_protection} = shift if @_; + $self->{allow_protection} //= 1; } +=attr binaries + + $bool = $dumper->binaries; + +Get whether or not binaries within the database should be written. Default: C + +=cut + sub binaries { my $self = shift; $self->{binaries} = shift if @_; $self->{binaries} //= $self->kdbx->version < KDBX_VERSION_4_0; } +=attr compress_binaries + + $tristate = $dumper->compress_binaries; + +Get whether or not to compress binaries. Possible values: + +=for :list +* C - Always compress binaries +* C - Never compress binaries +* C - Compress binaries if it results in smaller database sizes (default) + +=cut + sub compress_binaries { my $self = shift; $self->{compress_binaries} = shift if @_; $self->{compress_binaries}; } +=attr compress_datetimes + + $bool = $dumper->compress_datetimes; + +Get whether or not to write compressed datetimes. Datetimes are traditionally written in the human-readable +string format of C<1970-01-01T00:00:00Z>, but they can also be written in a compressed form to save some +bytes. The default is to write compressed datetimes if the KDBX file version is 4+, otherwise use the +human-readable format. + +=cut + sub compress_datetimes { my $self = shift; $self->{compress_datetimes} = shift if @_; $self->{compress_datetimes}; } +=attr header_hash + + $octets = $dumper->header_hash; + +Get the value to be written as the B in the B section. This is the way KDBX3 files validate +the authenticity of header data. This is unnecessary and should not be used with KDBX4 files because that +format uses HMAC-SHA256 to detect tampering. + +L automatically calculates the header hash an provides it to this module, and plain +XML files which don't have a KDBX wrapper don't have headers and so should have a header hash. Therefore there +is probably never any reason to set this manually. + +=cut + sub header_hash { $_[0]->{header_hash} } sub _binaries_written { $_[0]->{_binaries_written} //= {} } @@ -195,15 +248,15 @@ sub _write_xml_compressed_content { $value = \$encoded; } - my $always_compress = $self->compress_binaries; - my $try_compress = $always_compress || !defined $always_compress; + my $should_compress = $self->compress_binaries; + my $try_compress = $should_compress || !defined $should_compress; my $compressed; if ($try_compress) { $compressed = gzip($$value); push @cleanup, erase_scoped $compressed; - if ($always_compress || length($compressed) < length($$value)) { + if ($should_compress || length($compressed) < length($$value)) { $value = \$compressed; $node->setAttribute('Compressed', _encode_bool(true)); } @@ -266,13 +319,11 @@ sub _write_xml_root { my $node = shift; my $kdbx = $self->kdbx; - my $is_locked = $kdbx->is_locked; - my $guard = Scope::Guard->new(sub { $kdbx->lock if $is_locked }); - $kdbx->unlock; + my $guard = $kdbx->unlock_scoped; - if (my $group = $kdbx->{root}) { + if (my $group = $kdbx->root) { my $group_node = $node->addNewChild(undef, 'Group'); - $self->_write_xml_group($group_node, $group); + $self->_write_xml_group($group_node, $group->_confirmed); } undef $guard; # re-lock if needed, as early as possible @@ -311,14 +362,14 @@ sub _write_xml_group { ) : (), ); - for my $entry (@{$group->{entries} || []}) { + for my $entry (@{$group->entries}) { my $entry_node = $node->addNewChild(undef, 'Entry'); - $self->_write_xml_entry($entry_node, $entry); + $self->_write_xml_entry($entry_node, $entry->_confirmed); } - for my $group (@{$group->{groups} || []}) { + for my $group (@{$group->groups}) { my $group_node = $node->addNewChild(undef, 'Group'); - $self->_write_xml_group($group_node, $group); + $self->_write_xml_group($group_node, $group->_confirmed); } } @@ -395,11 +446,11 @@ sub _write_xml_entry { ); if (!$in_history) { - if (my @history = @{$entry->{history} || []}) { + if (my @history = @{$entry->history}) { my $history_node = $node->addNewChild(undef, 'History'); for my $historical (@history) { my $historical_node = $history_node->addNewChild(undef, 'Entry'); - $self->_write_xml_entry($historical_node, $historical, 1); + $self->_write_xml_entry($historical_node, $historical->_confirmed, 1); } } } @@ -461,7 +512,7 @@ sub _write_xml_entry_string { my $protect = $string->{protect} || $memory_protection->{$memprot_key}; if ($protect) { - if ($self->protect) { + if ($self->allow_protection) { my $encoded; if (utf8::is_utf8($value)) { $encoded = encode('UTF-8', $value); diff --git a/lib/File/KDBX/Entry.pm b/lib/File/KDBX/Entry.pm index 5e666bb..bc81d5a 100644 --- a/lib/File/KDBX/Entry.pm +++ b/lib/File/KDBX/Entry.pm @@ -11,8 +11,8 @@ use File::KDBX::Constants qw(:history :icon); use File::KDBX::Error; use File::KDBX::Util qw(:function :uri generate_uuid load_optional); use Hash::Util::FieldHash; -use List::Util qw(sum0); -use Ref::Util qw(is_plain_hashref); +use List::Util qw(first sum0); +use Ref::Util qw(is_coderef is_plain_hashref); use Scalar::Util qw(looks_like_number); use Storable qw(dclone); use Time::Piece; @@ -170,9 +170,7 @@ sub uuid { for my $entry (@{$self->history}) { $entry->{uuid} = $uuid; } - # if (defined $old_uuid and my $kdbx = $KDBX{$self}) { - # $kdbx->_update_entry_uuid($old_uuid, $uuid, $self); - # } + $self->_signal('uuid.changed', $uuid, $old_uuid) if defined $old_uuid && $self->is_current; } $self->{uuid}; } @@ -180,7 +178,7 @@ sub uuid { my @ATTRS = qw(uuid custom_data history); my %ATTRS = ( # uuid => sub { generate_uuid(printable => 1) }, - icon_id => ICON_PASSWORD, + icon_id => sub { defined $_[1] ? icon($_[1]) : ICON_PASSWORD }, custom_icon_uuid => undef, foreground_color => '', background_color => '', @@ -195,13 +193,13 @@ my %ATTRS = ( # history => sub { +[] }, ); my %ATTRS_TIMES = ( - last_modification_time => sub { gmtime }, - creation_time => sub { gmtime }, - last_access_time => sub { gmtime }, - expiry_time => sub { gmtime }, + last_modification_time => sub { scalar gmtime }, + creation_time => sub { scalar gmtime }, + last_access_time => sub { scalar gmtime }, + expiry_time => sub { scalar gmtime }, expires => false, usage_count => 0, - location_changed => sub { gmtime }, + location_changed => sub { scalar gmtime }, ); my %ATTRS_STRINGS = ( title => 'Title', @@ -211,12 +209,16 @@ my %ATTRS_STRINGS = ( notes => 'Notes', ); -while (my ($attr, $default) = each %ATTRS) { +while (my ($attr, $setter) = each %ATTRS) { no strict 'refs'; ## no critic (ProhibitNoStrict) - *{$attr} = sub { + *{$attr} = is_coderef $setter ? sub { + my $self = shift; + $self->{$attr} = $setter->($self, shift) if @_; + $self->{$attr} //= $setter->($self); + } : sub { my $self = shift; $self->{$attr} = shift if @_; - $self->{$attr} //= (ref $default eq 'CODE') ? $default->($self) : $default; + $self->{$attr} //= $setter; }; } while (my ($attr, $default) = each %ATTRS_TIMES) { @@ -798,25 +800,83 @@ sub prune_history { } } -sub add_history { +=method add_historical_entry + + $entry->add_historical_entry($entry); + +Add an entry to the history. + +=cut + +sub add_historical_entry { my $self = shift; delete $_->{history} for @_; push @{$self->{history} //= []}, map { $self->_wrap_entry($_) } @_; } +=method current_entry + + $current_entry = $entry->current_entry; + +Get an entry's current entry. If the entry itself is current (not historical), itself is returned. + +=cut + +sub current_entry { + my $self = shift; + my $group = $self->parent; + + if ($group) { + my $id = $self->uuid; + my $entry = first { $id eq $_->uuid } @{$group->entries}; + return $entry if $entry; + } + + return $self; +} + +=method is_current + + $bool = $entry->is_current; + +Get whether or not an entry is considered current (i.e. not historical). An entry is current if it is directly +in the parent group's entry list. + +=cut + +sub is_current { + my $self = shift; + my $current = $self->current_entry; + return Hash::Util::FieldHash::id($self) == Hash::Util::FieldHash::id($current); +} + +=method is_historical + + $bool = $entry->is_historical; + +Get whether or not an entry is considered historical (i.e. not current). + +This is just the inverse of L. + +=cut + +sub is_historical { !$_[0]->is_current } + ############################################################################## -sub begin_work { +sub _signal { my $self = shift; - require File::KDBX::Transaction; - return File::KDBX::Transaction->new($self, @_); + my $type = shift; + return $self->SUPER::_signal("entry.$type", @_); } sub _commit { my $self = shift; - my $txn = shift; - $self->add_history($txn->original); - $self->last_modification_time(gmtime); + my $orig = shift; + $self->add_historical_entry($orig); + my $time = gmtime; + $self->last_modification_time($time); + $self->last_access_time($time); } sub label { shift->expanded_title(@_) } @@ -844,6 +904,8 @@ There is also some metadata associated with an entry. Each entry in a database i a UUID. An entry can also have an icon associated with it, and there are various timestamps. Take a look at the attributes to see what's available. +A B is a subclass of L. + =head2 Placeholders Entry string and auto-type key sequences can have placeholders or template tags that can be replaced by other diff --git a/lib/File/KDBX/Group.pm b/lib/File/KDBX/Group.pm index 652d3aa..d68189f 100644 --- a/lib/File/KDBX/Group.pm +++ b/lib/File/KDBX/Group.pm @@ -10,7 +10,7 @@ use File::KDBX::Error; use File::KDBX::Util qw(generate_uuid); use Hash::Util::FieldHash; use List::Util qw(sum0); -use Ref::Util qw(is_ref); +use Ref::Util qw(is_coderef is_ref); use Scalar::Util qw(blessed); use Time::Piece; use boolean; @@ -28,7 +28,7 @@ my %ATTRS = ( name => '', notes => '', tags => '', - icon_id => ICON_FOLDER, + icon_id => sub { defined $_[1] ? icon($_[1]) : ICON_FOLDER }, custom_icon_uuid => undef, is_expanded => false, default_auto_type_sequence => '', @@ -41,21 +41,25 @@ my %ATTRS = ( # groups => sub { +[] }, ); my %ATTRS_TIMES = ( - last_modification_time => sub { gmtime }, - creation_time => sub { gmtime }, - last_access_time => sub { gmtime }, - expiry_time => sub { gmtime }, + last_modification_time => sub { scalar gmtime }, + creation_time => sub { scalar gmtime }, + last_access_time => sub { scalar gmtime }, + expiry_time => sub { scalar gmtime }, expires => false, usage_count => 0, - location_changed => sub { gmtime }, + location_changed => sub { scalar gmtime }, ); -while (my ($attr, $default) = each %ATTRS) { +while (my ($attr, $setter) = each %ATTRS) { no strict 'refs'; ## no critic (ProhibitNoStrict) - *{$attr} = sub { + *{$attr} = is_coderef $setter ? sub { + my $self = shift; + $self->{$attr} = $setter->($self, shift) if @_; + $self->{$attr} //= $setter->($self); + } : sub { my $self = shift; $self->{$attr} = shift if @_; - $self->{$attr} //= (ref $default eq 'CODE') ? $default->($self) : $default; + $self->{$attr} //= $setter; }; } while (my ($attr, $default) = each %ATTRS_TIMES) { @@ -78,9 +82,7 @@ sub uuid { my %args = @_ % 2 == 1 ? (uuid => shift, @_) : @_; my $old_uuid = $self->{uuid}; my $uuid = $self->{uuid} = delete $args{uuid} // generate_uuid; - # if (defined $old_uuid and my $kdbx = $KDBX{$self}) { - # $kdbx->_update_group_uuid($old_uuid, $uuid, $self); - # } + $self->_signal('uuid.changed', $uuid, $old_uuid) if defined $old_uuid; } $self->{uuid}; } @@ -307,6 +309,19 @@ sub depth { $_[0]->is_root ? 0 : (scalar @{$_[0]->lineage || []} || -1) } sub label { shift->name(@_) } +sub _signal { + my $self = shift; + my $type = shift; + return $self->SUPER::_signal("group.$type", @_); +} + +sub _commit { + my $self = shift; + my $time = gmtime; + $self->last_modification_time($time); + $self->last_access_time($time); +} + 1; __END__ diff --git a/lib/File/KDBX/Loader.pm b/lib/File/KDBX/Loader.pm index 9f51321..ff44832 100644 --- a/lib/File/KDBX/Loader.pm +++ b/lib/File/KDBX/Loader.pm @@ -212,11 +212,32 @@ sub kdbx { =attr format -TODO +Get the file format used for reading the database. Normally the format is auto-detected from the data stream. +This auto-detection works well, so there's not really a good reason to explicitly specify the format. +Possible formats: + +=for :list +* C +* C +* C +* C +* C =cut sub format { $_[0]->{format} } + +=attr inner_format + +Get the format of the data inside the KDBX envelope. This only applies to C and C formats. Possible +formats: + +=for :list +* C - Read the database groups and entries as XML (default) +* C - Read parsing and store the result in L + +=cut + sub inner_format { $_[0]->{inner_format} // 'XML' } =attr min_version @@ -346,3 +367,9 @@ sub _read_inner_body { } 1; +__END__ + +=head1 DESCRIPTION + + +=cut diff --git a/lib/File/KDBX/Loader/XML.pm b/lib/File/KDBX/Loader/XML.pm index 43dd82a..806b261 100644 --- a/lib/File/KDBX/Loader/XML.pm +++ b/lib/File/KDBX/Loader/XML.pm @@ -37,8 +37,6 @@ sub _read_inner_body { my $self = shift; my $fh = shift; - # print do { local $/; <$fh> }; - # exit; my $reader = $self->{_reader} = XML::LibXML::Reader->new(IO => $fh); delete $self->{safe}; diff --git a/lib/File/KDBX/Object.pm b/lib/File/KDBX/Object.pm index 9cc33ca..afede78 100644 --- a/lib/File/KDBX/Object.pm +++ b/lib/File/KDBX/Object.pm @@ -8,13 +8,14 @@ use Devel::GlobalDestruction; use File::KDBX::Error; use File::KDBX::Util qw(:uuid); use Hash::Util::FieldHash qw(fieldhashes); -use Ref::Util qw(is_arrayref is_plain_hashref is_ref); +use List::Util qw(first); +use Ref::Util qw(is_arrayref is_plain_arrayref is_plain_hashref is_ref); use Scalar::Util qw(blessed weaken); use namespace::clean; our $VERSION = '999.999'; # VERSION -fieldhashes \my (%KDBX, %PARENT); +fieldhashes \my (%KDBX, %PARENT, %TXNS, %REFS, %SIGNALS); =method new @@ -210,9 +211,10 @@ sub STORABLE_thaw { local $CLONE{history} = 1; local $CLONE{reference_password} = 0; local $CLONE{reference_username} = 0; + # Clone only the entry's data and manually bless to avoid infinite recursion. bless Storable::dclone({%$copy}), 'File::KDBX::Entry'; }; - my $txn = $self->begin_work($clone_obj); + my $txn = $self->begin_work(snapshot => $clone_obj); if ($CLONE{reference_password}) { $self->password("{REF:P\@I:$uuid}"); } @@ -223,6 +225,9 @@ sub STORABLE_thaw { } $self->uuid(generate_uuid) if $CLONE{new_uuid}; } + + # Dualvars aren't cloned as dualvars, so dualify the icon. + $self->icon_id($self->{icon_id}) if defined $self->{icon_id}; } =attr kdbx @@ -279,12 +284,10 @@ Get the parent group to which an object belongs or C if it belongs to no sub group { my $self = shift; - my $addr = Hash::Util::FieldHash::id($self); + my $id = Hash::Util::FieldHash::id($self); if (my $group = $PARENT{$self}) { my $method = $self->_parent_container; - for my $object (@{$group->$method}) { - return $group if $addr == Hash::Util::FieldHash::id($object); - } + return $group if first { $id == Hash::Util::FieldHash::id($_) } @{$group->$method}; delete $PARENT{$self}; } # always get lineage from root to leaf because the other way requires parent, so it would be recursive @@ -464,6 +467,216 @@ sub custom_data_value { return $data->{value}; } +############################################################################## + +sub _signal { + my $self = shift; + my $type = shift; + + if ($self->_in_txn) { + my $stack = $self->_signal_stack; + my $queue = $stack->[-1]; + push @$queue, [$type, @_]; + } + + $self->_signal_send([[$type, @_]]); +} + +sub _signal_stack { $SIGNALS{$_[0]} //= [] } + +sub _signal_begin_work { + my $self = shift; + push @{$self->_signal_stack}, []; +} + +sub _signal_commit { + my $self = shift; + my $signals = pop @{$self->_signal_stack}; + my $previous = $self->_signal_stack->[-1] // []; + push @$previous, @$signals; + return $previous; +} + +sub _signal_rollback { + my $self = shift; + pop @{$self->_signal_stack}; +} + +sub _signal_send { + my $self = shift; + my $signals = shift // []; + + my $kdbx = $KDBX{$self} or return; + + # de-duplicate, keeping the most recent signal for each type + my %seen; + my @signals = grep { !$seen{$_->[0]}++ } reverse @$signals; + + for my $sig (reverse @signals) { + $kdbx->_handle_signal($self, @$sig); + } +} + +############################################################################## + +=method begin_work + + $txn = $object->begin_work(%options); + $object->begin_work(%options); + +Begin a new transaction. Returns a L object that can be scoped to ensure a rollback +occurs if exceptions are thrown. Alternatively, if called in void context, there will be no +B and it is instead your responsibility to call L or L as +appropriate. It is undefined behavior to call these if a B exists. Recursive +transactions are allowed. + +Signals created during a transaction are delayed until all transactions are resolved. If the outermost +transaction is committed, then the signals are de-duplicated and delivered. Otherwise the signals are dropped. +This means that the KDBX database will not fix broken references or mark itself dirty until after the +transaction is committed. + +How it works: With the beginning of a transaction, a snapshot of the object is created. In the event of +a rollback, the object's data is replaced with data from the snapshot. + +By default, the snapshot is shallow (i.e. does not include subroups, entries or historical entries). This +means that only modifications to the object itself (its data, fields, strings, etc.) are atomic; modifications +to subroups etc., including adding or removing items, are auto-committed instantly and will persist regardless +of the result of the pending transaction. You can override this for groups, entries and history independently +using options: + +=for :list +* C - If set, snapshot entries within a group, deeply (default: false) +* C - If set, snapshot subroups within a group, deeply (default: false) +* C - If set, snapshot historical entries within an entry (default: false) + +For example, if you begin a transaction on a group object using the C option, like this: + + $group->begin_work(entries => 1); + +Then if you modify any of the group's entries OR add new entries OR delete entries, all of that will be undone +if the transaction is rolled back. With a default-configured transaction, however, changes to entries are kept +even if the transaction is rolled back. + +=cut + +sub begin_work { + my $self = shift; + + if (defined wantarray) { + require File::KDBX::Transaction; + return File::KDBX::Transaction->new($self, @_); + } + + my %args = @_; + my $orig = $args{snapshot} // do { + my $c = $self->clone( + entries => $args{entries} // 0, + groups => $args{groups} // 0, + history => $args{history} // 0, + ); + $c->{entries} = $self->{entries} if !$args{entries}; + $c->{groups} = $self->{groups} if !$args{groups}; + $c->{history} = $self->{history} if !$args{history}; + $c; + }; + + my $id = Hash::Util::FieldHash::id($orig); + _save_references($id, $self, $orig); + + $self->_signal_begin_work; + + push @{$self->_txns}, $orig; +} + +=method commit + + $object->commit; + +Commit a transaction, making updates to C<$object> permanent. Returns itself to allow method chaining. + +=cut + +sub commit { + my $self = shift; + my $orig = pop @{$self->_txns} or return $self; + $self->_commit($orig); + my $signals = $self->_signal_commit; + $self->_signal_send($signals) if !$self->_in_txn; + return $self; +} + +sub _commit { die 'Not implemented' } +sub _in_txn { scalar @{$_[0]->_txns} } +sub _txns { $TXNS{$_[0]} //= [] } + +=method rollback + + $object->rollback; + +Roll back the most recent transaction, throwing away any updates to the L made since the transaction +began. Returns itself to allow method chaining. + +=cut + +sub rollback { + my $self = shift; + + my $orig = pop @{$self->_txns} or return $self; + + my $id = Hash::Util::FieldHash::id($orig); + _restore_references($id, $orig); + + $self->_signal_rollback; + + return $self; +} + +sub _save_references { + my $id = shift; + my $self = shift; + my $orig = shift; + + if (is_plain_arrayref($orig)) { + for (my $i = 0; $i < @$orig; ++$i) { + _save_references($id, $self->[$i], $orig->[$i]); + } + $REFS{$id}{Hash::Util::FieldHash::id($orig)} = $self; + } + elsif (is_plain_hashref($orig) || (blessed $orig && $orig->isa(__PACKAGE__))) { + for my $key (keys %$orig) { + _save_references($id, $self->{$key}, $orig->{$key}); + } + $REFS{$id}{Hash::Util::FieldHash::id($orig)} = $self; + } +} + +sub _restore_references { + my $id = shift; + my $orig = shift // return; + my $self = delete $REFS{$id}{Hash::Util::FieldHash::id($orig) // ''} // return $orig; + + if (is_plain_arrayref($orig)) { + @$self = map { _restore_references($id, $_) } @$orig; + } + elsif (is_plain_hashref($orig) || (blessed $orig && $orig->isa(__PACKAGE__))) { + for my $key (keys %$orig) { + # next if is_ref($orig->{$key}) && + # (Hash::Util::FieldHash::id($self->{$key}) // 0) == Hash::Util::FieldHash::id($orig->{$key}); + $self->{$key} = _restore_references($id, $orig->{$key}); + } + } + + return $self; +} + +sub _confirmed { + my $self = shift; + my ($orig) = @{$self->_txns}; + return $orig // $self; +} + +############################################################################## + sub _wrap_group { my $self = shift; my $group = shift; @@ -496,4 +709,40 @@ but instead use its subclasses: There is some functionality shared by both types of objects, and that's what this class provides. +Each object can be associated with a L database or be disassociated. A disassociated object will +not be persisted when dumping a database. It is also possible for an object to be associated with a database +but not be part of the object tree (i.e. is not the root group or any subroup or entry). A disassociated +object or an object not part of the object tree of a database can be added to a database using one of: + +=for :list +* L +* L +* L +* L +* L + +It is possible to copy or move objects between databases, but you B include the same object in more +than one database at once or there could some strange aliasing effects (i.e. changes in one database might +effect another in unexpected ways). This could lead to difficult-to-debug problems. It is similarly not safe +or valid to add the same object multiple times to the same database. For example: + + my $entry = File::KDBX::Entry->(title => 'Whatever'); + + # DO NOT DO THIS: + $kdbx->add_entry($entry); + $another_kdbx->add_entry($entry); + + # DO NOT DO THIS: + $kdbx->add_entry($entry); + $kdbx->add_entry($entry); # again + +Instead, do this: + + # Copy an entry to multiple databases: + $kdbx->add_entry($entry); + $another_kdbx->add_entry($entry->clone); + + # OR move an existing entry from one database to another: + $kdbx->add_entry($entry->remove); + =cut diff --git a/lib/File/KDBX/Transaction.pm b/lib/File/KDBX/Transaction.pm index 10e8b3f..0ed48b2 100644 --- a/lib/File/KDBX/Transaction.pm +++ b/lib/File/KDBX/Transaction.pm @@ -9,38 +9,65 @@ use namespace::clean; our $VERSION = '999.999'; # VERSION +=method new + + $txn = File::KDBX::Transaction->new($object); + +Construct a new database transaction for editing an object atomically. + +=cut + sub new { - my $class = shift; - my $object = shift; - my $orig = shift // $object->clone; - return bless {object => $object, original => $orig}, $class; + my $class = shift; + my $object = shift; + $object->begin_work(@_); + return bless {object => $object}, $class; } sub DESTROY { !in_global_destruction and $_[0]->rollback } -sub object { $_[0]->{object} } -sub original { $_[0]->{original} } +=attr object + +Get the object being transacted on. + +=cut + +sub object { $_[0]->{object} } + +=method commit + + $txn->commit; + +Commit the transaction, making updates to the L permanent. + +=cut sub commit { my $self = shift; + return if $self->{done}; + my $obj = $self->object; - if (my $commit = $obj->can('_commit')) { - $commit->($obj, $self); - } - $self->{committed} = 1; + $obj->commit; + $self->{done} = 1; return $obj; } +=method rollback + + $txn->rollback; + +Roll back the transaction, throwing away any updates to the L made since the transaction began. This +happens automatically when the transaction is released, unless it has already been committed. + +=cut + sub rollback { my $self = shift; - return if $self->{committed}; + return if $self->{done}; my $obj = $self->object; - my $orig = $self->original; - - %$obj = (); - @$obj{keys %$orig} = values %$orig; - + $obj->rollback; + $self->{done} = 1; return $obj; } diff --git a/lib/File/KDBX/Util.pm b/lib/File/KDBX/Util.pm index 630b181..3355d41 100644 --- a/lib/File/KDBX/Util.pm +++ b/lib/File/KDBX/Util.pm @@ -29,7 +29,7 @@ our %EXPORT_TAGS = ( gzip => [qw(gzip gunzip)], io => [qw(is_readable is_writable read_all)], load => [qw(load_optional load_xs try_load_optional)], - search => [qw(query search simple_expression_query)], + search => [qw(query search search_limited simple_expression_query)], text => [qw(snakify trim)], uuid => [qw(format_uuid generate_uuid is_uuid uuid)], uri => [qw(split_url uri_escape_utf8 uri_unescape_utf8)], @@ -348,6 +348,7 @@ See L. =cut sub erase_scoped { + throw 'Programmer error: Cannot call erase_scoped in void context' if !defined wantarray; my @args; for (@_) { !is_ref($_) || is_arrayref($_) || is_hashref($_) || is_scalarref($_) @@ -618,7 +619,6 @@ This is the search engine described with many examples at L. sub search { my $list = shift; my $query = shift; - # my %args = @_; if (is_coderef($query) && !@_) { # already a query @@ -630,12 +630,32 @@ sub search { $query = query($query, @_); } - # my $limit = $args{limit}; + my @match; + for my $item (@$list) { + push @match, $item if $query->($item); + } + return \@match; +} + +sub search_limited { + my $list = shift; + my $query = shift; + my $limit = shift // 1; + + if (is_coderef($query) && !@_) { + # already a query + } + elsif (is_scalarref($query)) { + $query = simple_expression_query($$query, @_); + } + else { + $query = query($query, @_); + } my @match; for my $item (@$list) { push @match, $item if $query->($item); - # last if defined $limit && $limit <= @match; + last if $limit <= @match; } return \@match; } diff --git a/t/entry.t b/t/entry.t index 9171eb4..6de8028 100644 --- a/t/entry.t +++ b/t/entry.t @@ -80,11 +80,11 @@ subtest 'Custom icons' => sub { my $entry = File::KDBX::Entry->new(my $kdbx = File::KDBX->new, icon_id => 42); is $entry->custom_icon_uuid, undef, 'UUID is undef if no custom icon is set'; is $entry->custom_icon, undef, 'Icon is undef if no custom icon is set'; - is $entry->icon_id, 42, 'Default icon is set to something'; + is $entry->icon_id, 'KCMMemory', 'Default icon is set to something'; is $entry->custom_icon($gif), $gif, 'Setting a custom icon returns icon'; is $entry->custom_icon, $gif, 'Henceforth the icon is set'; - is $entry->icon_id, 0, 'Default icon got changed to first icon'; + is $entry->icon_id, 'Password', 'Default icon got changed to first icon'; my $uuid = $entry->custom_icon_uuid; isnt $uuid, undef, 'UUID is now set'; @@ -96,4 +96,39 @@ subtest 'Custom icons' => sub { is $found, $gif, 'Custom icon still exists in the database'; }; +subtest 'History' => sub { + my $kdbx = File::KDBX->new; + my $entry = $kdbx->add_entry(label => 'Foo'); + is scalar @{$entry->history}, 0, 'New entry starts with no history'; + is $entry->current_entry, $entry, 'Current new entry is itself'; + ok $entry->is_current, 'New entry is current'; + + my $txn = $entry->begin_work; + $entry->notes('Hello!'); + $txn->commit; + is scalar @{$entry->history}, 1, 'Committing creates a historical entry'; + ok $entry->is_current, 'New entry is still current'; + ok $entry->history->[0]->is_historical, 'Historical entry is not current'; + is $entry->notes, 'Hello!', 'New entry is modified after commit'; + is $entry->history->[0]->notes, '', 'Historical entry is saved without modification'; +}; + +subtest 'Update UUID' => sub { + my $kdbx = File::KDBX->new; + + my $entry1 = $kdbx->add_entry(label => 'Foo'); + my $entry2 = $kdbx->add_entry(label => 'Bar'); + + $entry2->url(sprintf('{REF:T@I:%s} {REF:T@I:%s}', $entry1->id, lc($entry1->id))); + is $entry2->expanded_url, 'Foo Foo', 'Field reference expands' + or diag explain $entry2->url; + + $entry1->uuid("\1" x 16); + + is $entry2->url, '{REF:T@I:01010101010101010101010101010101} {REF:T@I:01010101010101010101010101010101}', + 'Replace field references when an entry UUID is changed'; + is $entry2->expanded_url, 'Foo Foo', 'Field reference expands after UUID is changed' + or diag explain $entry2->url; +}; + done_testing; diff --git a/t/kdb.t b/t/kdb.t index ab4fea4..6e1cda6 100644 --- a/t/kdb.t +++ b/t/kdb.t @@ -103,7 +103,7 @@ sub test_custom_icons { for my $test ( ['Custom icons' => $kdbx], ['Custom icons after dump & load roundtrip' - => File::KDBX->load_string($kdbx->dump_string('a', upgrade => 0, randomize_seeds => 0), 'a')], + => File::KDBX->load_string($kdbx->dump_string('a', allow_upgrade => 0, randomize_seeds => 0), 'a')], ) { my ($name, $kdbx) = @$test; subtest $name, \&test_custom_icons, $kdbx; diff --git a/t/object.t b/t/object.t index 749066d..ff46cf8 100644 --- a/t/object.t +++ b/t/object.t @@ -32,7 +32,7 @@ subtest 'Cloning' => sub { $txn->commit; $copy = $entry->clone; - is @{$copy->history}, 1, 'Copy has a historical entry'; + is @{$copy->history}, 1, 'Copy has a historical entry' or dumper $copy->history; cmp_deeply $copy, $entry, 'Entry with history and its clone are identical'; $copy = $entry->clone(history => 0); @@ -88,4 +88,91 @@ subtest 'Cloning' => sub { 'First entry in group and its copy are different'; }; +subtest 'Transactions' => sub { + my $kdbx = File::KDBX->new; + + my $root = $kdbx->root; + my $entry = $kdbx->add_entry( + label => 'One', + last_modification_time => Time::Piece->strptime('2022-04-20', '%Y-%m-%d'), + username => 'Fred', + ); + + my $txn = $root->begin_work; + $root->label('Toor'); + $root->notes(''); + $txn->commit; + is $root->label, 'Toor', 'Retain change to root label after commit'; + + $root->begin_work; + $root->label('Root'); + $entry->label('Zap'); + $root->rollback; + is $root->label, 'Toor', 'Undo change to root label after rollback'; + is $entry->label, 'Zap', 'Retain change to entry after rollback'; + + $txn = $root->begin_work(entries => 1); + $root->label('Root'); + $entry->label('Zippy'); + undef $txn; # implicit rollback + is $root->label, 'Toor', 'Undo change to root label after implicit rollback'; + is $entry->label, 'Zap', 'Undo change to entry after rollback with deep transaction'; + + $txn = $entry->begin_work; + my $mtime = $entry->last_modification_time; + my $username = $entry->string('UserName'); + $username->{meh} = 'hi'; + $entry->username('jinx'); + $txn->rollback; + is $entry->string('UserName'), $username, 'Rollback keeps original references'; + is $entry->last_modification_time, $mtime, 'No last modification time change after rollback'; + + $txn = $entry->begin_work; + $entry->username('jinx'); + $txn->commit; + isnt $entry->last_modification_time, $mtime, 'Last modification time changes after commit'; + + { + my $txn1 = $root->begin_work; + $root->label('alien'); + { + my $txn2 = $root->begin_work; + $root->label('truth'); + $txn2->commit; + } + } + is $root->label, 'Toor', 'Changes thrown away after rolling back outer transaction'; + + { + my $txn1 = $root->begin_work; + $root->label('alien'); + { + my $txn2 = $root->begin_work; + $root->label('truth'); + } + $txn1->commit; + } + is $root->label, 'alien', 'Keep committed change after rolling back inner transaction'; + + { + my $txn1 = $root->begin_work; + $root->label('alien'); + { + my $txn2 = $root->begin_work; + $root->label('truth'); + $txn2->commit; + } + $txn1->commit; + } + is $root->label, 'truth', 'Keep committed change from inner transaction'; + + $txn = $root->begin_work; + $root->label('Lalala'); + my $dump = $kdbx->dump_string('a'); + $txn->commit; + is $root->label, 'Lalala', 'Keep committed label change after dump'; + my $load = File::KDBX->load_string($dump, 'a'); + is $load->root->label, 'truth', 'Object dumped before committing matches the pre-transaction state'; +}; + done_testing; -- 2.43.0