use File::KDBX::Constants qw(:all);
use File::KDBX::Error;
use File::KDBX::Safe;
-use File::KDBX::Util qw(:empty erase generate_uuid search simple_expression_query snakify);
+use File::KDBX::Util qw(:empty :uuid :search erase simple_expression_query snakify);
use Hash::Util::FieldHash qw(fieldhashes);
use List::Util qw(any);
use Ref::Util qw(is_ref is_arrayref is_plain_hashref);
__PACKAGE__, $VERSION, @Config::Config{qw(package version osname osvers archname)});
}
-=attr sig1
-
-=attr sig2
-
-=attr version
-
-=attr headers
-
-=attr inner_headers
-
-=attr meta
-
-=attr binaries
-
-=attr deleted_objects
-
-=attr raw
-
- $value = $kdbx->$attr;
- $kdbx->$attr($value);
-
-Get and set attributes.
-
-=cut
-
my %ATTRS = (
sig1 => KDBX_SIG1,
sig2 => KDBX_SIG2_2,
generator => '',
header_hash => '',
database_name => '',
- database_name_changed => sub { gmtime },
+ database_name_changed => sub { scalar gmtime },
database_description => '',
- database_description_changed => sub { gmtime },
+ database_description_changed => sub { scalar gmtime },
default_username => '',
- default_username_changed => sub { gmtime },
+ default_username_changed => sub { scalar gmtime },
maintenance_history_days => 0,
color => '',
- master_key_changed => sub { gmtime },
+ master_key_changed => sub { scalar gmtime },
master_key_change_rec => -1,
master_key_change_force => -1,
# memory_protection => sub { +{} },
custom_icons => sub { +{} },
recycle_bin_enabled => true,
recycle_bin_uuid => "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0",
- recycle_bin_changed => sub { gmtime },
+ recycle_bin_changed => sub { scalar gmtime },
entry_templates_group => "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0",
- entry_templates_group_changed => sub { gmtime },
+ entry_templates_group_changed => sub { scalar gmtime },
last_selected_group => "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0",
last_top_visible_group => "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0",
history_max_items => HISTORY_DEFAULT_MAX_ITEMS,
history_max_size => HISTORY_DEFAULT_MAX_SIZE,
- settings_changed => sub { gmtime },
+ settings_changed => sub { scalar gmtime },
# binaries => sub { +{} },
# custom_data => sub { +{} },
);
protect_password => true,
protect_url => false,
protect_notes => false,
- auto_enable_visual_hiding => false,
+ # auto_enable_visual_hiding => false,
);
-sub _update_group_uuid {
- my $self = shift;
- my $old_uuid = shift // return;
- my $new_uuid = shift;
-
- my $meta = $self->meta;
- $self->recycle_bin_uuid($new_uuid) if $old_uuid eq ($meta->{recycle_bin_uuid} // '');
- $self->entry_templates_group($new_uuid) if $old_uuid eq ($meta->{entry_templates_group} // '');
- $self->last_selected_group($new_uuid) if $old_uuid eq ($meta->{last_selected_group} // '');
- $self->last_top_visible_group($new_uuid) if $old_uuid eq ($meta->{last_top_visible_group} // '');
-
- for my $group (@{$self->all_groups}) {
- $group->last_top_visible_entry($new_uuid) if $old_uuid eq ($group->{last_top_visible_entry} // '');
- $group->previous_parent_group($new_uuid) if $old_uuid eq ($group->{previous_parent_group} // '');
- }
- for my $entry (@{$self->all_entries}) {
- $entry->previous_parent_group($new_uuid) if $old_uuid eq ($entry->{previous_parent_group} // '');
- }
-}
-
-sub _update_entry_uuid {
- my $self = shift;
- my $old_uuid = shift // return;
- my $new_uuid = shift;
-
- for my $entry (@{$self->all_entries}) {
- $entry->previous_parent_group($new_uuid) if $old_uuid eq ($entry->{previous_parent_group} // '');
- }
-}
-
while (my ($attr, $default) = each %ATTRS) {
no strict 'refs'; ## no critic (ProhibitNoStrict)
*{$attr} = sub {
When reading such files, a single implicit root group is created to contain the other explicit groups. When
writing to such a format, if the root group looks like it was implicitly created then it won't be written and
the resulting file might have multiple root groups. This allows working with older files without changing
-their written internal structure while still adhering to the modern restrictions while the database is opened.
+their written internal structure while still adhering to modern semantics while the database is opened.
B<WARNING:> The root group of a KDBX database contains all of the database's entries and other groups. If you
replace the root group, you are essentially replacing the entire database contents with something else.
search => $args{search},
history => $args{history},
);
- return @{search($self->all_entries(%all_entries), is_arrayref($query) ? @$query : $query)};
+ my $limit = delete $args{limit};
+ if (defined $limit) {
+ return @{search_limited($self->all_entries(%all_entries), is_arrayref($query) ? @$query : $query, $limit)};
+ }
+ else {
+ return @{search($self->all_entries(%all_entries), is_arrayref($query) ? @$query : $query)};
+ }
}
sub find_entries_simple {
P => 'expanded_password',
A => 'expanded_url',
N => 'expanded_notes',
- I => 'id',
+ I => 'uuid',
O => 'other_strings',
);
$wanted = $fields{$wanted} or return;
$search_in = $fields{$search_in} or return;
- my $query = simple_expression_query($text, ($search_in eq 'id' ? 'eq' : '=~'), $search_in);
+ my $query = $search_in eq 'uuid' ? query($search_in => uuid($text))
+ : simple_expression_query($text, '=~', $search_in);
- my ($entry) = $self->find_entries($query);
+ my ($entry) = $self->find_entries($query, limit => 1);
$entry or return;
return $entry->$wanted;
=cut
-sub peek {
- my $self = shift;
- my $string = shift;
- my $safe = $self->_safe or return;
- return $safe->peek($string);
-}
-
sub unlock {
my $self = shift;
my $safe = $self->_safe or return $self;
return $self;
}
-# sub unlock_scoped {
-# my $self = shift;
-# return if !$self->is_locked;
-# require Scope::Guard;
-# my $guard = Scope::Guard->new(sub { $self->lock });
-# $self->unlock;
-# return $guard;
-# }
+=method unlock_scoped
+
+ $guard = $kdbx->unlock_scoped;
+
+Unlock a database temporarily, relocking when the guard is released (typically at the end of a scope). Returns
+C<undef> if the database is already unlocked.
+
+See L</lock> and L</unlock>.
+
+=cut
+
+sub unlock_scoped {
+ throw 'Programmer error: Cannot call unlock_scoped in void context' if !defined wantarray;
+ my $self = shift;
+ return if !$self->is_locked;
+ require Scope::Guard;
+ my $guard = Scope::Guard->new(sub { $self->lock });
+ $self->unlock;
+ return $guard;
+}
+
+=method peek
+
+ $string = $kdbx->peek(\%string);
+ $string = $kdbx->peek(\%binary);
+
+Peek at the value of a protected string or binary without unlocking the whole database. The argument can be
+a string or binary hashref as returned by L<File::KDBX::Entry/string> or L<File::KDBX::Entry/binary>.
+
+=cut
+
+sub peek {
+ my $self = shift;
+ my $string = shift;
+ my $safe = $self->_safe or return;
+ return $safe->peek($string);
+}
=method is_locked
#########################################################################################
+sub _handle_signal {
+ my $self = shift;
+ my $object = shift;
+ my $type = shift;
+
+ my %handlers = (
+ 'entry.uuid.changed' => \&_update_entry_uuid,
+ 'group.uuid.changed' => \&_update_group_uuid,
+ );
+ my $handler = $handlers{$type} or return;
+ $self->$handler($object, @_);
+}
+
+sub _update_group_uuid {
+ my $self = shift;
+ my $object = shift;
+ my $new_uuid = shift;
+ my $old_uuid = shift // return;
+
+ my $meta = $self->meta;
+ $self->recycle_bin_uuid($new_uuid) if $old_uuid eq ($meta->{recycle_bin_uuid} // '');
+ $self->entry_templates_group($new_uuid) if $old_uuid eq ($meta->{entry_templates_group} // '');
+ $self->last_selected_group($new_uuid) if $old_uuid eq ($meta->{last_selected_group} // '');
+ $self->last_top_visible_group($new_uuid) if $old_uuid eq ($meta->{last_top_visible_group} // '');
+
+ for my $group (@{$self->all_groups}) {
+ $group->last_top_visible_entry($new_uuid) if $old_uuid eq ($group->{last_top_visible_entry} // '');
+ $group->previous_parent_group($new_uuid) if $old_uuid eq ($group->{previous_parent_group} // '');
+ }
+ for my $entry (@{$self->all_entries}) {
+ $entry->previous_parent_group($new_uuid) if $old_uuid eq ($entry->{previous_parent_group} // '');
+ }
+}
+
+sub _update_entry_uuid {
+ my $self = shift;
+ my $object = shift;
+ my $new_uuid = shift;
+ my $old_uuid = shift // return;
+
+ my $old_pretty = format_uuid($old_uuid);
+ my $new_pretty = format_uuid($new_uuid);
+ my $fieldref_match = qr/\{REF:([TUPANI])\@I:\Q$old_pretty\E\}/is;
+
+ for my $entry (@{$self->all_entries}) {
+ $entry->previous_parent_group($new_uuid) if $old_uuid eq ($entry->{previous_parent_group} // '');
+
+ for my $string (values %{$entry->strings}) {
+ next if !defined $string->{value} || $string->{value} !~ $fieldref_match;
+ my $txn = $entry->begin_work;
+ $string->{value} =~ s/$fieldref_match/{REF:$1\@I:$new_pretty}/g;
+ $txn->commit;
+ }
+ }
+}
+
+#########################################################################################
+
=attr comment
A text string associated with the database. Often unset.
L<File::KeePass> is a much older alternative. It's good but has a backlog of bugs and lacks support for newer
KDBX features.
+=attr sig1
+
+=attr sig2
+
+=attr version
+
+=attr headers
+
+=attr inner_headers
+
+=attr meta
+
+=attr binaries
+
+=attr deleted_objects
+
+=attr raw
+
+ $value = $kdbx->$attr;
+ $kdbx->$attr($value);
+
+Get and set attributes.
+
=cut
use Crypt::Digest qw(digest_data);
use File::KDBX::Constants qw(:cipher :random_stream);
use File::KDBX::Error;
+use Scalar::Util qw(blessed);
use Module::Load;
use namespace::clean;
return $self;
}
+=method crypt
+
+ $ciphertext = $cipher->crypt($plaintext);
+ $plaintext = $cipher->crypt($ciphertext);
+
+Encrypt or decrypt some data. These ciphers are symmetric, so encryption and decryption are the same
+operation. This method is an alias for both L<File::KDBX::Cipher/encrypt> and L<File::KDBX::Cipher/decrypt>.
+
+=cut
+
sub crypt {
my $self = shift;
my $stream = $self->_stream;
return join('', map { $stream->crypt(ref $_ ? $$_ : $_) } grep { defined } @_);
}
+=method keystream
+
+ $stream = $cipher->keystream;
+
+Access the keystream.
+
+=cut
+
sub keystream {
my $self = shift;
return $self->_stream->keystream(@_);
}
+=method dup
+
+ $cipher_copy = $cipher->dup(%attributes);
+
+Get a copy of an existing cipher with the counter reset, optionally applying new attributes.
+
+=cut
+
sub dup {
- my $self = shift;
- my $dup = File::KDBX::Cipher->new(
- stream_id => $self->stream_id,
- key => $self->key,
- @_,
- );
- $dup->{key} = $self->key;
- $dup->{iv} = $self->iv;
- # FIXME - probably turn this into a proper clone method
+ my $self = shift;
+ my $class = blessed($self);
+
+ my $dup = bless {%$self, @_}, $class;
+ delete $dup->{stream};
return $dup;
}
sub finish { delete $_[0]->{stream}; '' }
-sub counter { $_[0]->{counter} // 0 }
-sub offset { $_[0]->{offset} }
-
=attr algorithm
+ $algorithm = $cipher->algorithm;
+
Get the stream cipher algorithm. Can be one of C<Salsa20> and C<ChaCha>.
+=attr counter
+
+ $counter = $cipher->counter;
+
+Get the initial counter / block count into the keystream.
+
+=attr offset
+
+ $offset = $cipher->offset;
+
+Get the initial byte offset into the keystream. This has precedence over L</counter> if both are set.
+
=cut
sub algorithm { $_[0]->{algorithm} or throw 'Stream cipher algorithm is not set' }
+sub counter { $_[0]->{counter} // 0 }
+sub offset { $_[0]->{offset} }
sub key_size { { Salsa20 => 32, ChaCha => 32 }->{$_[0]->{algorithm} || ''} // 0 }
sub iv_size { { Salsa20 => 8, ChaCha => 12 }->{$_[0]->{algorithm} || ''} // -1 }
sub block_size { 1 }
our %EXPORT_TAGS;
push @{$EXPORT_TAGS{header}}, 'KDBX_HEADER';
push @{$EXPORT_TAGS{inner_header}}, 'KDBX_INNER_HEADER';
+push @{$EXPORT_TAGS{icon}}, 'icon';
$EXPORT_TAGS{all} = [map { @$_ } values %EXPORT_TAGS];
our @EXPORT_OK = sort @{$EXPORT_TAGS{all}};
}
sub KDBX_INNER_HEADER { $INNER_HEADER{$_[0]} }
+my %ICON;
+for my $icon (
+ ICON_PASSWORD, ICON_PACKAGE_NETWORK, ICON_MESSAGEBOX_WARNING, ICON_SERVER, ICON_KLIPPER,
+ ICON_EDU_LANGUAGES, ICON_KCMDF, ICON_KATE, ICON_SOCKET, ICON_IDENTITY, ICON_KONTACT, ICON_CAMERA,
+ ICON_IRKICKFLASH, ICON_KGPG_KEY3, ICON_LAPTOP_POWER, ICON_SCANNER, ICON_MOZILLA_FIREBIRD,
+ ICON_CDROM_UNMOUNT, ICON_DISPLAY, ICON_MAIL_GENERIC, ICON_MISC, ICON_KORGANIZER, ICON_ASCII, ICON_ICONS,
+ ICON_CONNECT_ESTABLISHED, ICON_FOLDER_MAIL, ICON_FILESAVE, ICON_NFS_UNMOUNT, ICON_MESSAGE, ICON_KGPG_TERM,
+ ICON_KONSOLE, ICON_FILEPRINT, ICON_FSVIEW, ICON_RUN, ICON_CONFIGURE, ICON_KRFB, ICON_ARK,
+ ICON_KPERCENTAGE, ICON_SAMBA_UNMOUNT, ICON_HISTORY, ICON_MAIL_FIND, ICON_VECTORGFX, ICON_KCMMEMORY,
+ ICON_TRASHCAN_FULL, ICON_KNOTES, ICON_CANCEL, ICON_HELP, ICON_KPACKAGE, ICON_FOLDER,
+ ICON_FOLDER_BLUE_OPEN, ICON_FOLDER_TAR, ICON_DECRYPTED, ICON_ENCRYPTED, ICON_APPLY, ICON_SIGNATURE,
+ ICON_THUMBNAIL, ICON_KADDRESSBOOK, ICON_VIEW_TEXT, ICON_KGPG, ICON_PACKAGE_DEVELOPMENT, ICON_KFM_HOME,
+ ICON_SERVICES, ICON_TUX, ICON_FEATHER, ICON_APPLE, ICON_W, ICON_MONEY, ICON_CERTIFICATE, ICON_SMARTPHONE,
+) {
+ $ICON{$icon} = $ICON{0+$icon} = $icon;
+}
+sub icon { $ICON{$_[0] // ''} // ICON_PASSWORD }
+
1;
__END__
=attr format
+Get the file format used for writing the database. Normally the format is auto-detected from the database,
+which is the safest choice. Possible formats:
+
+=for :list
+* C<V3>
+* C<V4>
+* C<KDB>
+* C<XML> (only used if explicitly set)
+* C<Raw> (only used if explicitly set)
+
+B<WARNING:> There is a potential for data loss if you explicitly use a format that doesn't support the
+features used by the KDBX database being written.
+
+The most common reason to explicitly specify the file format is to save a database as an unencrypted XML file:
+
+ $kdbx->dump_file('database.xml', format => 'XML');
+
=cut
sub format { $_[0]->{format} }
+
+=attr inner_format
+
+Get the format of the data inside the KDBX envelope. This only applies to C<V3> and C<V4> formats. Possible
+formats:
+
+=for :list
+* C<XML> - Write the database groups and entries as XML (default)
+* C<Raw> - Write L<File::KDBX/raw> instead of the actual database contents
+
+=cut
+
sub inner_format { $_[0]->{inner_format} // 'XML' }
=attr min_version
sub min_version { KDBX_VERSION_OLDEST }
-sub upgrade { $_[0]->{upgrade} // 1 }
+=attr allow_upgrade
+
+ $bool = $dumper->allow_upgrade;
+
+Whether or not to allow implicitly upgrading a database to a newer version. When enabled, in order to avoid
+potential data loss, the database can be upgraded as-needed in cases where the database file format version is
+too low to support new features being used.
+
+The default is to allow upgrading.
+
+=cut
+
+sub allow_upgrade { $_[0]->{allow_upgrade} // 1 }
+
+=attr randomize_seeds
+
+ $bool = $dumper->randomize_seeds;
+
+Whether or not to randomize seeds in a database before writing. The default is to randomize seeds, and there's
+not often a good reason not to do so. If disabled, the seeds associated with the KDBX database will be used as
+they are.
+
+=cut
sub randomize_seeds { $_[0]->{randomize_seeds} // 1 }
my $kdbx = $self->kdbx;
my $min_version = $kdbx->minimum_version;
- if ($kdbx->version < $min_version && $self->upgrade) {
+ if ($kdbx->version < $min_version && $self->allow_upgrade) {
alert sprintf("Implicitly upgrading database from %x to %x\n", $kdbx->version, $min_version),
version => $kdbx->version, min_version => $min_version;
$kdbx->version($min_version);
use File::KDBX::Util qw(assert_64bit erase_scoped gzip snakify);
use IO::Handle;
use Scalar::Util qw(isdual looks_like_number);
-use Scope::Guard;
use Time::Piece;
use XML::LibXML;
use boolean;
our $VERSION = '999.999'; # VERSION
-sub protect {
+=attr allow_protection
+
+ $bool = $dumper->allow_protection;
+
+Get whether or not protected strings and binaries should be written in an encrypted stream. Default: C<TRUE>
+
+=cut
+
+sub allow_protection {
my $self = shift;
- $self->{protect} = shift if @_;
- $self->{protect} //= 1;
+ $self->{allow_protection} = shift if @_;
+ $self->{allow_protection} //= 1;
}
+=attr binaries
+
+ $bool = $dumper->binaries;
+
+Get whether or not binaries within the database should be written. Default: C<TRUE>
+
+=cut
+
sub binaries {
my $self = shift;
$self->{binaries} = shift if @_;
$self->{binaries} //= $self->kdbx->version < KDBX_VERSION_4_0;
}
+=attr compress_binaries
+
+ $tristate = $dumper->compress_binaries;
+
+Get whether or not to compress binaries. Possible values:
+
+=for :list
+* C<TRUE> - Always compress binaries
+* C<FALSE> - Never compress binaries
+* C<undef> - Compress binaries if it results in smaller database sizes (default)
+
+=cut
+
sub compress_binaries {
my $self = shift;
$self->{compress_binaries} = shift if @_;
$self->{compress_binaries};
}
+=attr compress_datetimes
+
+ $bool = $dumper->compress_datetimes;
+
+Get whether or not to write compressed datetimes. Datetimes are traditionally written in the human-readable
+string format of C<1970-01-01T00:00:00Z>, but they can also be written in a compressed form to save some
+bytes. The default is to write compressed datetimes if the KDBX file version is 4+, otherwise use the
+human-readable format.
+
+=cut
+
sub compress_datetimes {
my $self = shift;
$self->{compress_datetimes} = shift if @_;
$self->{compress_datetimes};
}
+=attr header_hash
+
+ $octets = $dumper->header_hash;
+
+Get the value to be written as the B<HeaderHash> in the B<Meta> section. This is the way KDBX3 files validate
+the authenticity of header data. This is unnecessary and should not be used with KDBX4 files because that
+format uses HMAC-SHA256 to detect tampering.
+
+L<File::KDBX::Dumper::V3> automatically calculates the header hash an provides it to this module, and plain
+XML files which don't have a KDBX wrapper don't have headers and so should have a header hash. Therefore there
+is probably never any reason to set this manually.
+
+=cut
+
sub header_hash { $_[0]->{header_hash} }
sub _binaries_written { $_[0]->{_binaries_written} //= {} }
$value = \$encoded;
}
- my $always_compress = $self->compress_binaries;
- my $try_compress = $always_compress || !defined $always_compress;
+ my $should_compress = $self->compress_binaries;
+ my $try_compress = $should_compress || !defined $should_compress;
my $compressed;
if ($try_compress) {
$compressed = gzip($$value);
push @cleanup, erase_scoped $compressed;
- if ($always_compress || length($compressed) < length($$value)) {
+ if ($should_compress || length($compressed) < length($$value)) {
$value = \$compressed;
$node->setAttribute('Compressed', _encode_bool(true));
}
my $node = shift;
my $kdbx = $self->kdbx;
- my $is_locked = $kdbx->is_locked;
- my $guard = Scope::Guard->new(sub { $kdbx->lock if $is_locked });
- $kdbx->unlock;
+ my $guard = $kdbx->unlock_scoped;
- if (my $group = $kdbx->{root}) {
+ if (my $group = $kdbx->root) {
my $group_node = $node->addNewChild(undef, 'Group');
- $self->_write_xml_group($group_node, $group);
+ $self->_write_xml_group($group_node, $group->_confirmed);
}
undef $guard; # re-lock if needed, as early as possible
) : (),
);
- for my $entry (@{$group->{entries} || []}) {
+ for my $entry (@{$group->entries}) {
my $entry_node = $node->addNewChild(undef, 'Entry');
- $self->_write_xml_entry($entry_node, $entry);
+ $self->_write_xml_entry($entry_node, $entry->_confirmed);
}
- for my $group (@{$group->{groups} || []}) {
+ for my $group (@{$group->groups}) {
my $group_node = $node->addNewChild(undef, 'Group');
- $self->_write_xml_group($group_node, $group);
+ $self->_write_xml_group($group_node, $group->_confirmed);
}
}
);
if (!$in_history) {
- if (my @history = @{$entry->{history} || []}) {
+ if (my @history = @{$entry->history}) {
my $history_node = $node->addNewChild(undef, 'History');
for my $historical (@history) {
my $historical_node = $history_node->addNewChild(undef, 'Entry');
- $self->_write_xml_entry($historical_node, $historical, 1);
+ $self->_write_xml_entry($historical_node, $historical->_confirmed, 1);
}
}
}
my $protect = $string->{protect} || $memory_protection->{$memprot_key};
if ($protect) {
- if ($self->protect) {
+ if ($self->allow_protection) {
my $encoded;
if (utf8::is_utf8($value)) {
$encoded = encode('UTF-8', $value);
use File::KDBX::Error;
use File::KDBX::Util qw(:function :uri generate_uuid load_optional);
use Hash::Util::FieldHash;
-use List::Util qw(sum0);
-use Ref::Util qw(is_plain_hashref);
+use List::Util qw(first sum0);
+use Ref::Util qw(is_coderef is_plain_hashref);
use Scalar::Util qw(looks_like_number);
use Storable qw(dclone);
use Time::Piece;
for my $entry (@{$self->history}) {
$entry->{uuid} = $uuid;
}
- # if (defined $old_uuid and my $kdbx = $KDBX{$self}) {
- # $kdbx->_update_entry_uuid($old_uuid, $uuid, $self);
- # }
+ $self->_signal('uuid.changed', $uuid, $old_uuid) if defined $old_uuid && $self->is_current;
}
$self->{uuid};
}
my @ATTRS = qw(uuid custom_data history);
my %ATTRS = (
# uuid => sub { generate_uuid(printable => 1) },
- icon_id => ICON_PASSWORD,
+ icon_id => sub { defined $_[1] ? icon($_[1]) : ICON_PASSWORD },
custom_icon_uuid => undef,
foreground_color => '',
background_color => '',
# history => sub { +[] },
);
my %ATTRS_TIMES = (
- last_modification_time => sub { gmtime },
- creation_time => sub { gmtime },
- last_access_time => sub { gmtime },
- expiry_time => sub { gmtime },
+ last_modification_time => sub { scalar gmtime },
+ creation_time => sub { scalar gmtime },
+ last_access_time => sub { scalar gmtime },
+ expiry_time => sub { scalar gmtime },
expires => false,
usage_count => 0,
- location_changed => sub { gmtime },
+ location_changed => sub { scalar gmtime },
);
my %ATTRS_STRINGS = (
title => 'Title',
notes => 'Notes',
);
-while (my ($attr, $default) = each %ATTRS) {
+while (my ($attr, $setter) = each %ATTRS) {
no strict 'refs'; ## no critic (ProhibitNoStrict)
- *{$attr} = sub {
+ *{$attr} = is_coderef $setter ? sub {
+ my $self = shift;
+ $self->{$attr} = $setter->($self, shift) if @_;
+ $self->{$attr} //= $setter->($self);
+ } : sub {
my $self = shift;
$self->{$attr} = shift if @_;
- $self->{$attr} //= (ref $default eq 'CODE') ? $default->($self) : $default;
+ $self->{$attr} //= $setter;
};
}
while (my ($attr, $default) = each %ATTRS_TIMES) {
}
}
-sub add_history {
+=method add_historical_entry
+
+ $entry->add_historical_entry($entry);
+
+Add an entry to the history.
+
+=cut
+
+sub add_historical_entry {
my $self = shift;
delete $_->{history} for @_;
push @{$self->{history} //= []}, map { $self->_wrap_entry($_) } @_;
}
+=method current_entry
+
+ $current_entry = $entry->current_entry;
+
+Get an entry's current entry. If the entry itself is current (not historical), itself is returned.
+
+=cut
+
+sub current_entry {
+ my $self = shift;
+ my $group = $self->parent;
+
+ if ($group) {
+ my $id = $self->uuid;
+ my $entry = first { $id eq $_->uuid } @{$group->entries};
+ return $entry if $entry;
+ }
+
+ return $self;
+}
+
+=method is_current
+
+ $bool = $entry->is_current;
+
+Get whether or not an entry is considered current (i.e. not historical). An entry is current if it is directly
+in the parent group's entry list.
+
+=cut
+
+sub is_current {
+ my $self = shift;
+ my $current = $self->current_entry;
+ return Hash::Util::FieldHash::id($self) == Hash::Util::FieldHash::id($current);
+}
+
+=method is_historical
+
+ $bool = $entry->is_historical;
+
+Get whether or not an entry is considered historical (i.e. not current).
+
+This is just the inverse of L</is_current>.
+
+=cut
+
+sub is_historical { !$_[0]->is_current }
+
##############################################################################
-sub begin_work {
+sub _signal {
my $self = shift;
- require File::KDBX::Transaction;
- return File::KDBX::Transaction->new($self, @_);
+ my $type = shift;
+ return $self->SUPER::_signal("entry.$type", @_);
}
sub _commit {
my $self = shift;
- my $txn = shift;
- $self->add_history($txn->original);
- $self->last_modification_time(gmtime);
+ my $orig = shift;
+ $self->add_historical_entry($orig);
+ my $time = gmtime;
+ $self->last_modification_time($time);
+ $self->last_access_time($time);
}
sub label { shift->expanded_title(@_) }
a UUID. An entry can also have an icon associated with it, and there are various timestamps. Take a look at
the attributes to see what's available.
+A B<File::KDBX::Entry> is a subclass of L<File::KDBX::Object>.
+
=head2 Placeholders
Entry string and auto-type key sequences can have placeholders or template tags that can be replaced by other
use File::KDBX::Util qw(generate_uuid);
use Hash::Util::FieldHash;
use List::Util qw(sum0);
-use Ref::Util qw(is_ref);
+use Ref::Util qw(is_coderef is_ref);
use Scalar::Util qw(blessed);
use Time::Piece;
use boolean;
name => '',
notes => '',
tags => '',
- icon_id => ICON_FOLDER,
+ icon_id => sub { defined $_[1] ? icon($_[1]) : ICON_FOLDER },
custom_icon_uuid => undef,
is_expanded => false,
default_auto_type_sequence => '',
# groups => sub { +[] },
);
my %ATTRS_TIMES = (
- last_modification_time => sub { gmtime },
- creation_time => sub { gmtime },
- last_access_time => sub { gmtime },
- expiry_time => sub { gmtime },
+ last_modification_time => sub { scalar gmtime },
+ creation_time => sub { scalar gmtime },
+ last_access_time => sub { scalar gmtime },
+ expiry_time => sub { scalar gmtime },
expires => false,
usage_count => 0,
- location_changed => sub { gmtime },
+ location_changed => sub { scalar gmtime },
);
-while (my ($attr, $default) = each %ATTRS) {
+while (my ($attr, $setter) = each %ATTRS) {
no strict 'refs'; ## no critic (ProhibitNoStrict)
- *{$attr} = sub {
+ *{$attr} = is_coderef $setter ? sub {
+ my $self = shift;
+ $self->{$attr} = $setter->($self, shift) if @_;
+ $self->{$attr} //= $setter->($self);
+ } : sub {
my $self = shift;
$self->{$attr} = shift if @_;
- $self->{$attr} //= (ref $default eq 'CODE') ? $default->($self) : $default;
+ $self->{$attr} //= $setter;
};
}
while (my ($attr, $default) = each %ATTRS_TIMES) {
my %args = @_ % 2 == 1 ? (uuid => shift, @_) : @_;
my $old_uuid = $self->{uuid};
my $uuid = $self->{uuid} = delete $args{uuid} // generate_uuid;
- # if (defined $old_uuid and my $kdbx = $KDBX{$self}) {
- # $kdbx->_update_group_uuid($old_uuid, $uuid, $self);
- # }
+ $self->_signal('uuid.changed', $uuid, $old_uuid) if defined $old_uuid;
}
$self->{uuid};
}
sub label { shift->name(@_) }
+sub _signal {
+ my $self = shift;
+ my $type = shift;
+ return $self->SUPER::_signal("group.$type", @_);
+}
+
+sub _commit {
+ my $self = shift;
+ my $time = gmtime;
+ $self->last_modification_time($time);
+ $self->last_access_time($time);
+}
+
1;
__END__
=attr format
-TODO
+Get the file format used for reading the database. Normally the format is auto-detected from the data stream.
+This auto-detection works well, so there's not really a good reason to explicitly specify the format.
+Possible formats:
+
+=for :list
+* C<V3>
+* C<V4>
+* C<KDB>
+* C<XML>
+* C<Raw>
=cut
sub format { $_[0]->{format} }
+
+=attr inner_format
+
+Get the format of the data inside the KDBX envelope. This only applies to C<V3> and C<V4> formats. Possible
+formats:
+
+=for :list
+* C<XML> - Read the database groups and entries as XML (default)
+* C<Raw> - Read parsing and store the result in L<File::KDBX/raw>
+
+=cut
+
sub inner_format { $_[0]->{inner_format} // 'XML' }
=attr min_version
}
1;
+__END__
+
+=head1 DESCRIPTION
+
+
+=cut
my $self = shift;
my $fh = shift;
- # print do { local $/; <$fh> };
- # exit;
my $reader = $self->{_reader} = XML::LibXML::Reader->new(IO => $fh);
delete $self->{safe};
use File::KDBX::Error;
use File::KDBX::Util qw(:uuid);
use Hash::Util::FieldHash qw(fieldhashes);
-use Ref::Util qw(is_arrayref is_plain_hashref is_ref);
+use List::Util qw(first);
+use Ref::Util qw(is_arrayref is_plain_arrayref is_plain_hashref is_ref);
use Scalar::Util qw(blessed weaken);
use namespace::clean;
our $VERSION = '999.999'; # VERSION
-fieldhashes \my (%KDBX, %PARENT);
+fieldhashes \my (%KDBX, %PARENT, %TXNS, %REFS, %SIGNALS);
=method new
local $CLONE{history} = 1;
local $CLONE{reference_password} = 0;
local $CLONE{reference_username} = 0;
+ # Clone only the entry's data and manually bless to avoid infinite recursion.
bless Storable::dclone({%$copy}), 'File::KDBX::Entry';
};
- my $txn = $self->begin_work($clone_obj);
+ my $txn = $self->begin_work(snapshot => $clone_obj);
if ($CLONE{reference_password}) {
$self->password("{REF:P\@I:$uuid}");
}
}
$self->uuid(generate_uuid) if $CLONE{new_uuid};
}
+
+ # Dualvars aren't cloned as dualvars, so dualify the icon.
+ $self->icon_id($self->{icon_id}) if defined $self->{icon_id};
}
=attr kdbx
sub group {
my $self = shift;
- my $addr = Hash::Util::FieldHash::id($self);
+ my $id = Hash::Util::FieldHash::id($self);
if (my $group = $PARENT{$self}) {
my $method = $self->_parent_container;
- for my $object (@{$group->$method}) {
- return $group if $addr == Hash::Util::FieldHash::id($object);
- }
+ return $group if first { $id == Hash::Util::FieldHash::id($_) } @{$group->$method};
delete $PARENT{$self};
}
# always get lineage from root to leaf because the other way requires parent, so it would be recursive
return $data->{value};
}
+##############################################################################
+
+sub _signal {
+ my $self = shift;
+ my $type = shift;
+
+ if ($self->_in_txn) {
+ my $stack = $self->_signal_stack;
+ my $queue = $stack->[-1];
+ push @$queue, [$type, @_];
+ }
+
+ $self->_signal_send([[$type, @_]]);
+}
+
+sub _signal_stack { $SIGNALS{$_[0]} //= [] }
+
+sub _signal_begin_work {
+ my $self = shift;
+ push @{$self->_signal_stack}, [];
+}
+
+sub _signal_commit {
+ my $self = shift;
+ my $signals = pop @{$self->_signal_stack};
+ my $previous = $self->_signal_stack->[-1] // [];
+ push @$previous, @$signals;
+ return $previous;
+}
+
+sub _signal_rollback {
+ my $self = shift;
+ pop @{$self->_signal_stack};
+}
+
+sub _signal_send {
+ my $self = shift;
+ my $signals = shift // [];
+
+ my $kdbx = $KDBX{$self} or return;
+
+ # de-duplicate, keeping the most recent signal for each type
+ my %seen;
+ my @signals = grep { !$seen{$_->[0]}++ } reverse @$signals;
+
+ for my $sig (reverse @signals) {
+ $kdbx->_handle_signal($self, @$sig);
+ }
+}
+
+##############################################################################
+
+=method begin_work
+
+ $txn = $object->begin_work(%options);
+ $object->begin_work(%options);
+
+Begin a new transaction. Returns a L<File::KDBX::Transaction> object that can be scoped to ensure a rollback
+occurs if exceptions are thrown. Alternatively, if called in void context, there will be no
+B<File::KDBX::Transaction> and it is instead your responsibility to call L</commit> or L</rollback> as
+appropriate. It is undefined behavior to call these if a B<File::KDBX::Transaction> exists. Recursive
+transactions are allowed.
+
+Signals created during a transaction are delayed until all transactions are resolved. If the outermost
+transaction is committed, then the signals are de-duplicated and delivered. Otherwise the signals are dropped.
+This means that the KDBX database will not fix broken references or mark itself dirty until after the
+transaction is committed.
+
+How it works: With the beginning of a transaction, a snapshot of the object is created. In the event of
+a rollback, the object's data is replaced with data from the snapshot.
+
+By default, the snapshot is shallow (i.e. does not include subroups, entries or historical entries). This
+means that only modifications to the object itself (its data, fields, strings, etc.) are atomic; modifications
+to subroups etc., including adding or removing items, are auto-committed instantly and will persist regardless
+of the result of the pending transaction. You can override this for groups, entries and history independently
+using options:
+
+=for :list
+* C<entries> - If set, snapshot entries within a group, deeply (default: false)
+* C<groups> - If set, snapshot subroups within a group, deeply (default: false)
+* C<history> - If set, snapshot historical entries within an entry (default: false)
+
+For example, if you begin a transaction on a group object using the C<entries> option, like this:
+
+ $group->begin_work(entries => 1);
+
+Then if you modify any of the group's entries OR add new entries OR delete entries, all of that will be undone
+if the transaction is rolled back. With a default-configured transaction, however, changes to entries are kept
+even if the transaction is rolled back.
+
+=cut
+
+sub begin_work {
+ my $self = shift;
+
+ if (defined wantarray) {
+ require File::KDBX::Transaction;
+ return File::KDBX::Transaction->new($self, @_);
+ }
+
+ my %args = @_;
+ my $orig = $args{snapshot} // do {
+ my $c = $self->clone(
+ entries => $args{entries} // 0,
+ groups => $args{groups} // 0,
+ history => $args{history} // 0,
+ );
+ $c->{entries} = $self->{entries} if !$args{entries};
+ $c->{groups} = $self->{groups} if !$args{groups};
+ $c->{history} = $self->{history} if !$args{history};
+ $c;
+ };
+
+ my $id = Hash::Util::FieldHash::id($orig);
+ _save_references($id, $self, $orig);
+
+ $self->_signal_begin_work;
+
+ push @{$self->_txns}, $orig;
+}
+
+=method commit
+
+ $object->commit;
+
+Commit a transaction, making updates to C<$object> permanent. Returns itself to allow method chaining.
+
+=cut
+
+sub commit {
+ my $self = shift;
+ my $orig = pop @{$self->_txns} or return $self;
+ $self->_commit($orig);
+ my $signals = $self->_signal_commit;
+ $self->_signal_send($signals) if !$self->_in_txn;
+ return $self;
+}
+
+sub _commit { die 'Not implemented' }
+sub _in_txn { scalar @{$_[0]->_txns} }
+sub _txns { $TXNS{$_[0]} //= [] }
+
+=method rollback
+
+ $object->rollback;
+
+Roll back the most recent transaction, throwing away any updates to the L</object> made since the transaction
+began. Returns itself to allow method chaining.
+
+=cut
+
+sub rollback {
+ my $self = shift;
+
+ my $orig = pop @{$self->_txns} or return $self;
+
+ my $id = Hash::Util::FieldHash::id($orig);
+ _restore_references($id, $orig);
+
+ $self->_signal_rollback;
+
+ return $self;
+}
+
+sub _save_references {
+ my $id = shift;
+ my $self = shift;
+ my $orig = shift;
+
+ if (is_plain_arrayref($orig)) {
+ for (my $i = 0; $i < @$orig; ++$i) {
+ _save_references($id, $self->[$i], $orig->[$i]);
+ }
+ $REFS{$id}{Hash::Util::FieldHash::id($orig)} = $self;
+ }
+ elsif (is_plain_hashref($orig) || (blessed $orig && $orig->isa(__PACKAGE__))) {
+ for my $key (keys %$orig) {
+ _save_references($id, $self->{$key}, $orig->{$key});
+ }
+ $REFS{$id}{Hash::Util::FieldHash::id($orig)} = $self;
+ }
+}
+
+sub _restore_references {
+ my $id = shift;
+ my $orig = shift // return;
+ my $self = delete $REFS{$id}{Hash::Util::FieldHash::id($orig) // ''} // return $orig;
+
+ if (is_plain_arrayref($orig)) {
+ @$self = map { _restore_references($id, $_) } @$orig;
+ }
+ elsif (is_plain_hashref($orig) || (blessed $orig && $orig->isa(__PACKAGE__))) {
+ for my $key (keys %$orig) {
+ # next if is_ref($orig->{$key}) &&
+ # (Hash::Util::FieldHash::id($self->{$key}) // 0) == Hash::Util::FieldHash::id($orig->{$key});
+ $self->{$key} = _restore_references($id, $orig->{$key});
+ }
+ }
+
+ return $self;
+}
+
+sub _confirmed {
+ my $self = shift;
+ my ($orig) = @{$self->_txns};
+ return $orig // $self;
+}
+
+##############################################################################
+
sub _wrap_group {
my $self = shift;
my $group = shift;
There is some functionality shared by both types of objects, and that's what this class provides.
+Each object can be associated with a L<File::KDBX> database or be disassociated. A disassociated object will
+not be persisted when dumping a database. It is also possible for an object to be associated with a database
+but not be part of the object tree (i.e. is not the root group or any subroup or entry). A disassociated
+object or an object not part of the object tree of a database can be added to a database using one of:
+
+=for :list
+* L<File::KDBX/add_entry>
+* L<File::KDBX/add_group>
+* L<File::KDBX::Group/add_entry>
+* L<File::KDBX::Group/add_group>
+* L<File::KDBX::Entry/add_historical_entry>
+
+It is possible to copy or move objects between databases, but you B<DO NOT> include the same object in more
+than one database at once or there could some strange aliasing effects (i.e. changes in one database might
+effect another in unexpected ways). This could lead to difficult-to-debug problems. It is similarly not safe
+or valid to add the same object multiple times to the same database. For example:
+
+ my $entry = File::KDBX::Entry->(title => 'Whatever');
+
+ # DO NOT DO THIS:
+ $kdbx->add_entry($entry);
+ $another_kdbx->add_entry($entry);
+
+ # DO NOT DO THIS:
+ $kdbx->add_entry($entry);
+ $kdbx->add_entry($entry); # again
+
+Instead, do this:
+
+ # Copy an entry to multiple databases:
+ $kdbx->add_entry($entry);
+ $another_kdbx->add_entry($entry->clone);
+
+ # OR move an existing entry from one database to another:
+ $kdbx->add_entry($entry->remove);
+
=cut
our $VERSION = '999.999'; # VERSION
+=method new
+
+ $txn = File::KDBX::Transaction->new($object);
+
+Construct a new database transaction for editing an object atomically.
+
+=cut
+
sub new {
- my $class = shift;
- my $object = shift;
- my $orig = shift // $object->clone;
- return bless {object => $object, original => $orig}, $class;
+ my $class = shift;
+ my $object = shift;
+ $object->begin_work(@_);
+ return bless {object => $object}, $class;
}
sub DESTROY { !in_global_destruction and $_[0]->rollback }
-sub object { $_[0]->{object} }
-sub original { $_[0]->{original} }
+=attr object
+
+Get the object being transacted on.
+
+=cut
+
+sub object { $_[0]->{object} }
+
+=method commit
+
+ $txn->commit;
+
+Commit the transaction, making updates to the L</object> permanent.
+
+=cut
sub commit {
my $self = shift;
+ return if $self->{done};
+
my $obj = $self->object;
- if (my $commit = $obj->can('_commit')) {
- $commit->($obj, $self);
- }
- $self->{committed} = 1;
+ $obj->commit;
+ $self->{done} = 1;
return $obj;
}
+=method rollback
+
+ $txn->rollback;
+
+Roll back the transaction, throwing away any updates to the L</object> made since the transaction began. This
+happens automatically when the transaction is released, unless it has already been committed.
+
+=cut
+
sub rollback {
my $self = shift;
- return if $self->{committed};
+ return if $self->{done};
my $obj = $self->object;
- my $orig = $self->original;
-
- %$obj = ();
- @$obj{keys %$orig} = values %$orig;
-
+ $obj->rollback;
+ $self->{done} = 1;
return $obj;
}
gzip => [qw(gzip gunzip)],
io => [qw(is_readable is_writable read_all)],
load => [qw(load_optional load_xs try_load_optional)],
- search => [qw(query search simple_expression_query)],
+ search => [qw(query search search_limited simple_expression_query)],
text => [qw(snakify trim)],
uuid => [qw(format_uuid generate_uuid is_uuid uuid)],
uri => [qw(split_url uri_escape_utf8 uri_unescape_utf8)],
=cut
sub erase_scoped {
+ throw 'Programmer error: Cannot call erase_scoped in void context' if !defined wantarray;
my @args;
for (@_) {
!is_ref($_) || is_arrayref($_) || is_hashref($_) || is_scalarref($_)
sub search {
my $list = shift;
my $query = shift;
- # my %args = @_;
if (is_coderef($query) && !@_) {
# already a query
$query = query($query, @_);
}
- # my $limit = $args{limit};
+ my @match;
+ for my $item (@$list) {
+ push @match, $item if $query->($item);
+ }
+ return \@match;
+}
+
+sub search_limited {
+ my $list = shift;
+ my $query = shift;
+ my $limit = shift // 1;
+
+ if (is_coderef($query) && !@_) {
+ # already a query
+ }
+ elsif (is_scalarref($query)) {
+ $query = simple_expression_query($$query, @_);
+ }
+ else {
+ $query = query($query, @_);
+ }
my @match;
for my $item (@$list) {
push @match, $item if $query->($item);
- # last if defined $limit && $limit <= @match;
+ last if $limit <= @match;
}
return \@match;
}
my $entry = File::KDBX::Entry->new(my $kdbx = File::KDBX->new, icon_id => 42);
is $entry->custom_icon_uuid, undef, 'UUID is undef if no custom icon is set';
is $entry->custom_icon, undef, 'Icon is undef if no custom icon is set';
- is $entry->icon_id, 42, 'Default icon is set to something';
+ is $entry->icon_id, 'KCMMemory', 'Default icon is set to something';
is $entry->custom_icon($gif), $gif, 'Setting a custom icon returns icon';
is $entry->custom_icon, $gif, 'Henceforth the icon is set';
- is $entry->icon_id, 0, 'Default icon got changed to first icon';
+ is $entry->icon_id, 'Password', 'Default icon got changed to first icon';
my $uuid = $entry->custom_icon_uuid;
isnt $uuid, undef, 'UUID is now set';
is $found, $gif, 'Custom icon still exists in the database';
};
+subtest 'History' => sub {
+ my $kdbx = File::KDBX->new;
+ my $entry = $kdbx->add_entry(label => 'Foo');
+ is scalar @{$entry->history}, 0, 'New entry starts with no history';
+ is $entry->current_entry, $entry, 'Current new entry is itself';
+ ok $entry->is_current, 'New entry is current';
+
+ my $txn = $entry->begin_work;
+ $entry->notes('Hello!');
+ $txn->commit;
+ is scalar @{$entry->history}, 1, 'Committing creates a historical entry';
+ ok $entry->is_current, 'New entry is still current';
+ ok $entry->history->[0]->is_historical, 'Historical entry is not current';
+ is $entry->notes, 'Hello!', 'New entry is modified after commit';
+ is $entry->history->[0]->notes, '', 'Historical entry is saved without modification';
+};
+
+subtest 'Update UUID' => sub {
+ my $kdbx = File::KDBX->new;
+
+ my $entry1 = $kdbx->add_entry(label => 'Foo');
+ my $entry2 = $kdbx->add_entry(label => 'Bar');
+
+ $entry2->url(sprintf('{REF:T@I:%s} {REF:T@I:%s}', $entry1->id, lc($entry1->id)));
+ is $entry2->expanded_url, 'Foo Foo', 'Field reference expands'
+ or diag explain $entry2->url;
+
+ $entry1->uuid("\1" x 16);
+
+ is $entry2->url, '{REF:T@I:01010101010101010101010101010101} {REF:T@I:01010101010101010101010101010101}',
+ 'Replace field references when an entry UUID is changed';
+ is $entry2->expanded_url, 'Foo Foo', 'Field reference expands after UUID is changed'
+ or diag explain $entry2->url;
+};
+
done_testing;
for my $test (
['Custom icons' => $kdbx],
['Custom icons after dump & load roundtrip'
- => File::KDBX->load_string($kdbx->dump_string('a', upgrade => 0, randomize_seeds => 0), 'a')],
+ => File::KDBX->load_string($kdbx->dump_string('a', allow_upgrade => 0, randomize_seeds => 0), 'a')],
) {
my ($name, $kdbx) = @$test;
subtest $name, \&test_custom_icons, $kdbx;
$txn->commit;
$copy = $entry->clone;
- is @{$copy->history}, 1, 'Copy has a historical entry';
+ is @{$copy->history}, 1, 'Copy has a historical entry' or dumper $copy->history;
cmp_deeply $copy, $entry, 'Entry with history and its clone are identical';
$copy = $entry->clone(history => 0);
'First entry in group and its copy are different';
};
+subtest 'Transactions' => sub {
+ my $kdbx = File::KDBX->new;
+
+ my $root = $kdbx->root;
+ my $entry = $kdbx->add_entry(
+ label => 'One',
+ last_modification_time => Time::Piece->strptime('2022-04-20', '%Y-%m-%d'),
+ username => 'Fred',
+ );
+
+ my $txn = $root->begin_work;
+ $root->label('Toor');
+ $root->notes('');
+ $txn->commit;
+ is $root->label, 'Toor', 'Retain change to root label after commit';
+
+ $root->begin_work;
+ $root->label('Root');
+ $entry->label('Zap');
+ $root->rollback;
+ is $root->label, 'Toor', 'Undo change to root label after rollback';
+ is $entry->label, 'Zap', 'Retain change to entry after rollback';
+
+ $txn = $root->begin_work(entries => 1);
+ $root->label('Root');
+ $entry->label('Zippy');
+ undef $txn; # implicit rollback
+ is $root->label, 'Toor', 'Undo change to root label after implicit rollback';
+ is $entry->label, 'Zap', 'Undo change to entry after rollback with deep transaction';
+
+ $txn = $entry->begin_work;
+ my $mtime = $entry->last_modification_time;
+ my $username = $entry->string('UserName');
+ $username->{meh} = 'hi';
+ $entry->username('jinx');
+ $txn->rollback;
+ is $entry->string('UserName'), $username, 'Rollback keeps original references';
+ is $entry->last_modification_time, $mtime, 'No last modification time change after rollback';
+
+ $txn = $entry->begin_work;
+ $entry->username('jinx');
+ $txn->commit;
+ isnt $entry->last_modification_time, $mtime, 'Last modification time changes after commit';
+
+ {
+ my $txn1 = $root->begin_work;
+ $root->label('alien');
+ {
+ my $txn2 = $root->begin_work;
+ $root->label('truth');
+ $txn2->commit;
+ }
+ }
+ is $root->label, 'Toor', 'Changes thrown away after rolling back outer transaction';
+
+ {
+ my $txn1 = $root->begin_work;
+ $root->label('alien');
+ {
+ my $txn2 = $root->begin_work;
+ $root->label('truth');
+ }
+ $txn1->commit;
+ }
+ is $root->label, 'alien', 'Keep committed change after rolling back inner transaction';
+
+ {
+ my $txn1 = $root->begin_work;
+ $root->label('alien');
+ {
+ my $txn2 = $root->begin_work;
+ $root->label('truth');
+ $txn2->commit;
+ }
+ $txn1->commit;
+ }
+ is $root->label, 'truth', 'Keep committed change from inner transaction';
+
+ $txn = $root->begin_work;
+ $root->label('Lalala');
+ my $dump = $kdbx->dump_string('a');
+ $txn->commit;
+ is $root->label, 'Lalala', 'Keep committed label change after dump';
+ my $load = File::KDBX->load_string($dump, 'a');
+ is $load->root->label, 'truth', 'Object dumped before committing matches the pre-transaction state';
+};
+
done_testing;