/[cvs]/nfo/perl/libs/Data/Transfer/Sync.pm
ViewVC logotype

Diff of /nfo/perl/libs/Data/Transfer/Sync.pm

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

revision 1.12 by joko, Sun Jan 19 00:50:05 2003 UTC revision 1.13 by joko, Sun Jan 19 01:23:03 2003 UTC
# Line 6  Line 6 
6  ##  ##
7  ##    ----------------------------------------------------------------------------------------  ##    ----------------------------------------------------------------------------------------
8  ##    $Log$  ##    $Log$
9  ##    Revision 1.12  2003/01/19 00:50:05  joko  ##    Revision 1.13  2003/01/19 01:23:03  joko
10  ##    - moved to Data/Transfer/Sync/ - Core.pm, API.pm  ##    + new from Data/Transfer/Sync.pm
11  ##  ##
 ##    Revision 1.11  2002/12/23 07:10:59  joko  
 ##    + using MD5 for checksum generation again - the 32-bit integer hash from DBI seems to be too lazy  
 ##  
 ##    Revision 1.10  2002/12/19 01:07:16  joko  
 ##    + fixed output done via $logger  
 ##  
 ##    Revision 1.9  2002/12/16 07:02:34  jonen  
 ##    + added comment  
 ##  
 ##    Revision 1.8  2002/12/15 02:03:09  joko  
 ##    + fixed logging-messages  
 ##    + additional metadata-checks  
 ##  
 ##    Revision 1.7  2002/12/13 21:49:34  joko  
 ##    + sub configure  
 ##    + sub checkOptions  
 ##  
 ##    Revision 1.6  2002/12/06 04:49:10  jonen  
 ##    + disabled output-puffer here  
 ##  
 ##    Revision 1.5  2002/12/05 08:06:05  joko  
 ##    + bugfix with determining empty fields (Null) with DBD::CSV  
 ##    + debugging  
 ##    + updated comments  
 ##  
 ##    Revision 1.4  2002/12/03 15:54:07  joko  
 ##    + {import}-flag is now {prepare}-flag  
 ##  
 ##    Revision 1.3  2002/12/01 22:26:59  joko  
 ##    + minor cosmetics for logging  
 ##  
 ##    Revision 1.2  2002/12/01 04:43:25  joko  
 ##    + mapping deatil entries may now be either an ARRAY or a HASH  
 ##    + erase flag is used now (for export-operations)  
 ##    + expressions to refer to values inside deep nested structures  
 ##    - removed old mappingV2-code  
 ##    + cosmetics  
 ##    + sub _erase_all  
 ##  
 ##    Revision 1.1  2002/11/29 04:45:50  joko  
 ##    + initial check in  
 ##  
 ##    Revision 1.1  2002/10/10 03:44:21  cvsjoko  
 ##    + new  
12  ##    ----------------------------------------------------------------------------------------  ##    ----------------------------------------------------------------------------------------
13    
14    
# Line 61  package Data::Transfer::Sync; Line 17  package Data::Transfer::Sync;
17  use strict;  use strict;
18  use warnings;  use warnings;
19    
20  use Data::Dumper;  use base qw( DesignPattern::Object );
21  #use Hash::Merge qw( merge );  use base qw( DesignPattern::Bridge );
22    
23    
24  use misc::HashExt;  # =====================================   main
 use libp qw( md5_base64 );  
 use libdb qw( quotesql hash2Sql );  
 use Data::Transform::Deep qw( hash2object refexpr2perlref );  
 use Data::Compare::Struct qw( getDifference isEmpty );  
25    
26  # get logger instance  # get logger instance
27  my $logger = Log::Dispatch::Config->instance;  my $logger = Log::Dispatch::Config->instance;
28    
 $| = 1;  
   
 sub new {  
   my $invocant = shift;  
   my $class = ref($invocant) || $invocant;  
   my $self = {};  
   $logger->debug( __PACKAGE__ . "->new(@_)" );  
   bless $self, $class;  
   $self->configure(@_);  
   return $self;  
 }  
   
   
 sub configure {  
   my $self = shift;  
   my @args = @_;  
   if (!isEmpty(\@args)) {  
     my %properties = @_;  
     # merge args to properties  
     map { $self->{$_} = $properties{$_}; } keys %properties;  
     $self->_init();  
   } else {  
     #print "no args!", "\n";  
   }  
   #print Dumper($self);  
 }  
   
29  sub _init {  sub _init {
30    my $self = shift;    my $self = shift;
31      $logger->debug( __PACKAGE__ . "->new" );
32    $self->{configured} = 1;    $self->load('Core');
33        $self->load('API');
34    # build new container if necessary    $self->configure(@_);
   $self->{container} = Data::Storage::Container->new() if !$self->{container};  
     
   # add storages to container (optional)  
   foreach (keys %{$self->{storages}}) {  
     $self->{container}->addStorage($_, $self->{storages}->{$_});  
   }  
     
   # tag storages with id-authority and checksum-provider information  
   # TODO: better store tag inside metadata to hold bits together!  
   map { $self->{container}->{storage}->{$_}->{isIdentAuthority} = 1 } @{$self->{id_authorities}};  
   map { $self->{container}->{storage}->{$_}->{isChecksumAuthority} = 1; } @{$self->{checksum_authorities}};  
   map { $self->{container}->{storage}->{$_}->{isWriteProtected} = 1; } @{$self->{write_protected}};  
     
 }  
   
   
 sub prepareOptions {  
   
   my $self = shift;  
   my $opts = shift;  
   
 #print Dumper($opts);  
   
   $opts->{mode} ||= '';  
   $opts->{erase} ||= 0;  
   #$opts->{import} ||= 0;  
     
   $logger->notice( __PACKAGE__ . "->prepareOptions( source_node $opts->{source_node} mode $opts->{mode} erase $opts->{erase} prepare $opts->{prepare} )");  
   
   if (!$opts->{mapping} || !$opts->{mapping_module}) {  
     $logger->warning( __PACKAGE__ . "->prepareOptions: No mapping supplied - please check key 'mappings' in BizWorks/Config.pm");  
   }  
   
   my $evstring = "use $opts->{mapping_module};";  
   eval($evstring);  
   if ($@) {  
     $logger->warning( __PACKAGE__ . "->prepareOptions: error while trying to access mapping - $@");  
     return;  
   }  
   
   # resolve mapping metadata (returned from sub)  
   my $mapObject = $opts->{mapping_module}->new();  
   #print Dumper($map);  
   my $source_node_name = $opts->{source_node};  
   # check if mapping for certain node is contained in mapping object  
   if (!$mapObject->can($source_node_name)) {  
     $logger->warning( __PACKAGE__ . "->prepareOptions: Can't access mapping for node \"$source_node_name\" - please check $opts->{mapping_module}.");  
     return;  
   }  
   my $map = $mapObject->$source_node_name;  
   
   # remove asymmetries from $map (patch keys)  
   $map->{source_node} = $map->{source}; delete $map->{source};  
   $map->{target_node} = $map->{target}; delete $map->{target};  
   $map->{mapping} = $map->{details}; delete $map->{details};  
   $map->{direction} = $map->{mode}; delete $map->{mode};  
   
   # defaults (mostly for backward-compatibility)  
   $map->{source_node} ||= $source_node_name;  
   $map->{source_ident} ||= 'storage_method:id';  
   $map->{target_ident} ||= 'property:oid';  
   $map->{direction} ||= $opts->{mode};         # | PUSH | PULL | FULL  
   $map->{method} ||= 'checksum';                # | timestamp  
   $map->{source_exclude} ||= [qw( cs )];  
   
   # merge map to opts  
   map { $opts->{$_} = $map->{$_}; } keys %$map;  
       
 #print Dumper($opts);  
   
   # TODO: move this to checkOptions...  
     
   # check - do we have a target?  
   if (!$opts->{target_node}) {  
     $logger->warning( __PACKAGE__ . "->prepareOptions: No target given - please check metadata declaration.");  
     return;  
   }  
   
   
   #return $opts;  
   return 1;  
   
 }  
   
   
 sub checkOptions {  
   my $self = shift;  
   my $opts = shift;  
     
   my $result = 1;  
     
   # check - do we have a target node?  
   if (!$opts->{target_node}) {  
     $logger->warning( __PACKAGE__ . "->checkOptions: Error while resolving resource metadata - no 'target node' could be determined.");  
     $result = 0;  
   }  
   
   # check - do we have a mapping?  
   if (!$opts->{mapping} && !$opts->{mapping_module}) {  
     $logger->warning( __PACKAGE__ . "->checkOptions: Error while resolving resource metadata - no 'mapping' could be determined.");  
     $result = 0;  
   }  
     
   return $result;  
     
 }  
   
   
 # TODO: some feature to show off the progress of synchronization (cur/max * 100)  
 sub syncNodes {  
   
   my $self = shift;  
   my $args = shift;  
   
   if (!$self->{configured}) {  
     $logger->critical( __PACKAGE__ . "->syncNodes: Synchronization object is not configured/initialized correctly." );  
     return;  
   }  
   
   # remember arguments through the whole processing  
   $self->{args} = $args;  
   
   $logger->debug( __PACKAGE__ . "->syncNodes: starting" );  
   
   # hash to hold and/or fill in metadata required for the processing  
   $self->{meta} = {};  
     
   # hash to sum up results  
   my $direction_arrow = '';  
   
   # detect synchronization method to determine which optical symbol (directed arrow) to use  
   if (lc $self->{args}->{direction} eq 'push') {  
     $direction_arrow = '->';  
   } elsif (lc $self->{args}->{direction} eq 'pull') {  
     $direction_arrow = '<-';  
   } elsif (lc $self->{args}->{direction} eq 'full') {  
     $direction_arrow = '<->';  
   } else {  
   }  
   
   # decompose identifiers for each partner  
   # TODO: refactor!!! take this list from already established/given metadata  
   foreach ('source', 'target') {  
       
     # get/set metadata for further processing  
   
     # Partner and Node (e.g.: "L:Country" or "R:countries.csv")  
     if (my $item = $self->{args}->{$_}) {  
       my @item = split(':', $item);  
       $self->{meta}->{$_}->{dbkey} = $item[0];  
       $self->{meta}->{$_}->{node} = $item[1];  
     }  
       
     # Filter  
     if (my $item_filter = $self->{args}->{$_ . '_filter'}) {  
       $self->{meta}->{$_}->{filter} = $item_filter;  
     }  
   
     # IdentProvider  
     if (my $item_ident = $self->{args}->{$_ . '_ident'}) {  
       my @item_ident = split(':', $item_ident);  
       $self->{meta}->{$_}->{IdentProvider} = { method => $item_ident[0], arg => $item_ident[1] };  
     }  
   
     # TODO: ChecksumProvider  
   
     # exclude properties/subnodes  
     if (my $item_exclude = $self->{args}->{$_ . '_exclude'}) {  
       $self->{meta}->{$_}->{subnodes_exclude} = $item_exclude;  
     }  
       
     # TypeProvider  
     if (my $item_type = $self->{args}->{$_ . '_type'}) {  
       my @item_type = split(':', $item_type);  
       $self->{meta}->{$_}->{TypeProvider} = { method => $item_type[0], arg => $item_type[1] };  
     }  
       
     # Callbacks - writers (will be triggered _before_ writing to target)  
     if (my $item_writers = $self->{args}->{$_ . '_callbacks_write'}) {  
       my $descent = $_;     # this is important since the following code inside the map wants to use its own context variables  
       map { $self->{meta}->{$descent}->{Callback}->{write}->{$_}++; } @$item_writers;  
     }  
       
     # Callbacks - readers (will be triggered _after_ reading from source)  
     if (my $item_readers = $self->{args}->{$_ . '_callbacks_read'}) {  
       my $descent = $_;  
       map { $self->{meta}->{$descent}->{Callback}->{read}->{$_}++; } @$item_readers;  
     }  
       
     # resolve storage objects  
     #$self->{$_} = $self->{container}->{storage}->{$self->{meta}->{$_}->{dbkey}};  
     # relink references to metainfo  
     $self->{meta}->{$_}->{storage} = $self->{container}->{storage}->{$self->{meta}->{$_}->{dbkey}};  
     #print "iiiiisprov: ", Dumper($self->{meta}->{$_}->{storage}), "\n";  
   }  
   
 #print Dumper($self->{meta});  
   
   $logger->info( __PACKAGE__ . "->syncNodes: source=$self->{meta}->{source}->{dbkey}/$self->{meta}->{source}->{node} $direction_arrow target=$self->{meta}->{target}->{dbkey}/$self->{meta}->{target}->{node}" );  
   
   # build mapping  
   # incoming: and Array of node map entries (Array or Hash) - e.g.  
   #   [ 'source:item_name' => 'target:class_val' ]  
   #   { source => 'event->startDateTime', target => 'begindate' }  
   foreach (@{$self->{args}->{mapping}}) {  
     if (ref $_ eq 'ARRAY') {  
       my @entry1 = split(':', $_->[0]);  
       my @entry2 = split(':', $_->[1]);  
       my $descent = [];  
       my $node = [];  
       $descent->[0] = $entry1[0];  
       $descent->[1] = $entry2[0];  
       $node->[0] = $entry1[1];  
       $node->[1] = $entry2[1];  
       push @{$self->{meta}->{$descent->[0]}->{childnodes}}, $node->[0];  
       push @{$self->{meta}->{$descent->[1]}->{childnodes}}, $node->[1];  
     } elsif (ref $_ eq 'HASH') {  
       foreach my $entry_key (keys %$_) {  
         my $entry_val = $_->{$entry_key};  
         push @{$self->{meta}->{$entry_key}->{childnodes}}, $entry_val;  
       }  
     }  
   
   }  
   
 #print Dumper($self->{meta});  
     
   # check partners/nodes: does partner exist / is node available?  
   foreach my $partner (keys %{$self->{meta}}) {  
       
     # 1. check partners & storages  
     if (!$self->{meta}->{$partner}) {  
       $logger->critical( __PACKAGE__ . "->syncNodes: Could not find partner '$partner' in configuration metadata." );  
       return;  
     }  
   
     my $dbkey = $self->{meta}->{$partner}->{dbkey};  
   
     if (!$self->{meta}->{$partner}->{storage}) {  
       $logger->critical( __PACKAGE__ . "->syncNodes: Could not access storage of partner '$partner' (named '$dbkey'), looks like a configuration-error." );  
       return;  
     }  
       
     # TODO:  
     # 2. check if partners (and nodes?) are actually available....  
     # eventually pre-check mode of access-attempt (read/write) here to provide an "early-croak" if possible  
       
     # 3. check nodes  
     next if $self->{meta}->{$partner}->{storage}->{locator}->{type} eq 'DBI';    # HACK for DBD::CSV - re-enable for others  
     # get node-name  
     my $node = $self->{meta}->{$partner}->{node};  
     if (!$self->{meta}->{$partner}->{storage}->existsChildNode($node)) {  
       $logger->critical( __PACKAGE__ . "->syncNodes: Could not reach node \"$node\" at partner \"$partner\"." );  
       return;  
     }  
       
   }  
   
   # TODO:  
   #   + if action == PUSH: start processing  
   #  -+  if action == PULL: swap metadata and start processing  
   #   -  if action == FULL: start processing, then swap metadata and (re-)start processing  
   
 #print Dumper($self->{args});  
   
   # manipulate metainfo according to direction of synchronization  
   if (lc $self->{args}->{direction} eq 'push') {  
     # just do it ...  
   } elsif (lc $self->{args}->{direction} eq 'pull') {  
     #print "=======SWAP", "\n";  
     # swap  
     ($self->{meta}->{source}, $self->{meta}->{target}) =  
         ($self->{meta}->{target}, $self->{meta}->{source});  
   } elsif (lc $self->{args}->{direction} eq 'full') {  
   } else {  
   }  
   
   # import flag means: prepare the source node to be syncable  
   # this is useful if there are e.g. no "ident" or "checksum" columns yet inside a DBI like (row-based) storage  
   if ($self->{args}->{prepare}) {  
     $self->_prepareNode_MetaProperties('source');  
     $self->_prepareNode_DummyIdent('source');  
     #return;  
     #$self->_erase_all($opts->{source_node});  
   }  
     
   # erase flag means: erase the target  
   #if ($opts->{erase}) {  
   if ($self->{args}->{erase}) {  
     # TODO: move this method to the scope of the synchronization core and wrap it around different handlers  
     #print "ERASE", "\n";  
     $self->_erase_all('target');  
   }  
   
   $self->_syncNodes();  
   
 }  
   
   
 # TODO: abstract the hardwired use of "source" and "target" in here somehow - hmmmm.......  /(="§/%???  
 sub _syncNodes {  
   
   my $self = shift;  
     
   my $tc = OneLineDumpHash->new( {} );  
   my $results;  
     
   # set of objects is already in $self->{args}  
   # TODO: make independent of the terminology "object..."  
   $results = $self->{args}->{objectSet} if $self->{args}->{objectSet};  
   
   # apply filter  
   if (my $filter = $self->{meta}->{source}->{filter}) {  
     #print Dumper($filter);  
     #exit;  
     $results ||= $self->_getNodeList('source', $filter);  
   }  
     
   # get reference to node list from convenient method provided by CORE-HANDLE  
   #$results ||= $self->{source}->getListUnfiltered($self->{meta}->{source}->{node});  
   #$results ||= $self->{meta}->{source}->{storage}->getListUnfiltered($self->{meta}->{source}->{node});  
   $results ||= $self->_getNodeList('source');  
   
   # checkpoint: do we actually have a list to iterate through?  
   if (!$results || !@{$results}) {  
     $logger->notice( __PACKAGE__ . "->syncNodes: No nodes to synchronize." );  
     return;  
   }  
     
   # dereference  
   my @results = @{$results};  
   
   # iterate through set  
   foreach my $source_node_real (@results) {  
   
     $tc->{total}++;  
   
 #print "========================  iter", "\n";  
   
     # clone object (in case we have to modify it here)  
     # TODO:  
     #   - is a "deep_copy" needed here if occouring modifications take place?  
     #   - puuhhhh, i guess a deep_copy would destroy tangram mechanisms?  
     #   - after all, just take care for now that this object doesn't get updated!  
     my $source_node = $source_node_real;  
   
     # modify entry - handle new style callbacks (the readers)  
 #print Dumper($source_node);  
 #exit;  
   
     my $descent = 'source';  
   
     # handle callbacks right now while scanning them (asymmetric to the writers)  
     my $map_callbacks = {};  
     if (my $callbacks = $self->{meta}->{$descent}->{Callback}) {  
   
       my $error = 0;  
   
       foreach my $node (keys %{$callbacks->{read}}) {  
           
         my $object = $source_node;  
         my $value; # = $source_node->{$node};  
   
         # ------------  half-redundant: make $self->callCallback($object, $value, $opts)  
         my $perl_callback = $self->{meta}->{$descent}->{node} . '::' . $node . '_read';  
         my $evalstring = 'return ' . $perl_callback . '( { object => $object, property => $node, value => $value, storage => $self->{meta}->{$descent}->{storage} } );';  
         #print $evalstring, "\n"; exit;  
         my $cb_result = eval($evalstring);  
         if ($@) {  
           die $@;  
           $error = 1;  
           print $@, "\n";  
         }  
         # ------------  half-redundant: make $self->callCallback($object, $value, $opts)  
           
         $source_node->{$node} = $cb_result;  
   
       }  
   
     }  
   
 #print Dumper($source_node);  
   
     # exclude defined fields  (simply delete from object)  
     map { delete $source_node->{$_} } @{$self->{meta}->{source}->{subnodes_exclude}};  
       
     # here we accumulate information about the status of the current node (payload/row/object/item/entry)  
     $self->{node} = {};  
     $self->{node}->{source}->{payload} = $source_node;  
   
 #print "res - ident", "\n";  
   
     # determine ident of entry  
     my $identOK = $self->_resolveNodeIdent('source');  
     #if (!$identOK && lc $self->{args}->{direction} ne 'import') {  
     if (!$identOK) {  
       #print Dumper($self->{meta}->{source});  
       $logger->critical( __PACKAGE__ . "->syncNodes: No ident found in source node \"$self->{meta}->{source}->{node}\", try to \"prepare\" this node first?" );  
       return;  
     }  
   
 #print "statload", "\n";  
 #print "ident: ", $self->{node}->{source}->{ident}, "\n";  
 #print Dumper($self->{node});  
       
     my $statOK = $self->_statloadNode('target', $self->{node}->{source}->{ident});  
   
 #print Dumper($self->{node});  
       
     # mark node as new either if there's no ident or if stat/load failed  
     if (!$statOK) {  
       $self->{node}->{status}->{new} = 1;  
       print "n" if $self->{verbose};  
     }  
   
 #print "checksum", "\n";  
       
     # determine status of entry by synchronization method  
     if ( (lc $self->{args}->{method} eq 'checksum') ) {  
     #if ( $statOK && (lc $self->{args}->{method} eq 'checksum') ) {  
     #if ( !$self->{node}->{status}->{new} && (lc $self->{args}->{method} eq 'checksum') ) {  
         
       # TODO:  
       # is this really worth a "critical"???  
       # no - it should just be a debug appendix i believe  
   
 #print "readcs", "\n";  
         
       # calculate checksum of source node  
       #$self->_calcChecksum('source');  
       if (!$self->_readChecksum('source')) {  
         $logger->critical( __PACKAGE__ . "->_readChecksum: Could not find \"source\" entry with ident=\"$self->{node}->{source}->{ident}\"" );  
         $tc->{skip}++;  
         print "s" if $self->{verbose};  
         next;  
       }  
         
       # get checksum from synchronization target  
       $self->_readChecksum('target');  
       #if (!$self->_readChecksum('target')) {  
       #  $logger->critical( __PACKAGE__ . "->_readChecksum: Could not find \"target\" entry with ident=\"$self->{node}->{source}->{ident}\"" );  
       #  next;  
       #}  
     
       # pre flight check: do we actually have a checksum provided?  
       #if (!$self->{node}->{source}->{checksum}) {  
       #  print "Source checksum for entry with ident \"$self->{node}->{source}->{ident}\" could not be calculated, maybe it's missing?.", "\n";  
       #  return;  
       #}  
         
       # determine if entry is "new" or "dirty"  
       # after all, this seems to be the point where the hammer falls.....  
       print "c" if $self->{verbose};  
       $self->{node}->{status}->{new} = !$self->{node}->{target}->{checksum};  
       if (!$self->{node}->{status}->{new}) {  
         $self->{node}->{status}->{dirty} =  
           $self->{node}->{status}->{new} ||  
           (!$self->{node}->{source}->{checksum} || !$self->{node}->{target}->{checksum}) ||  
           ($self->{node}->{source}->{checksum} ne $self->{node}->{target}->{checksum}) ||  
           $self->{args}->{force};  
       }  
   
     }  
   
     # first reaction on entry-status: continue with next entry if the current is already "in sync"  
     if (!$self->{node}->{status}->{new} && !$self->{node}->{status}->{dirty}) {  
       $tc->{in_sync}++;  
       next;  
     }  
   
     # build map to actually transfer the data from source to target  
     $self->_buildMap();  
   
   
 #print Dumper($self->{node}); exit;  
   
 #print "attempt", "\n";  
   
     # additional (new) checks for feature "write-protection"  
     if ($self->{meta}->{target}->{storage}->{isWriteProtected}) {  
       $tc->{attempt_transfer}++;  
       print "\n" if $self->{verbose};  
       $logger->notice( __PACKAGE__ . "->syncNodes: Target is write-protected. Will not insert or modify node. " .  
           "(Ident: $self->{node}->{source}->{ident} " . "Dump:\n" . Dumper($self->{node}->{source}->{payload}) . ")" );  
       print "\n" if $self->{verbose};  
       $tc->{skip}++;  
       next;  
     }  
   
     # transfer contents of map to target  
     if ($self->{node}->{status}->{new}) {  
       $tc->{attempt_new}++;  
       $self->_doTransferToTarget('insert');  
       # asymmetry: refetch node from target to re-calculate new ident and checksum (TODO: is IdentAuthority of relevance here?)  
       #print Dumper($self->{node});  
       $self->_statloadNode('target', $self->{node}->{target}->{ident}, 1);  
       $self->_readChecksum('target');  
   
     } elsif ($self->{node}->{status}->{dirty}) {  
       $tc->{attempt_modify}++;  
       # asymmetry: get ident before updating (TODO: is IdentAuthority of relevance here?)  
       $self->{node}->{target}->{ident} = $self->{node}->{map}->{$self->{meta}->{target}->{IdentProvider}->{arg}};  
       $self->_doTransferToTarget('update');  
       $self->_readChecksum('target');  
     }  
   
     if ($self->{node}->{status}->{ok}) {  
       $tc->{ok}++;  
       print "t" if $self->{verbose};  
     }  
       
     if ($self->{node}->{status}->{error}) {  
       $tc->{error}++;  
       push( @{$tc->{error_per_row}}, $self->{node}->{status}->{error} );  
       print "e" if $self->{verbose};  
     }  
       
     # change ident in source (take from target), if transfer was ok and target is an IdentAuthority  
     # this is (for now) called a "retransmit" indicated by a "r"-character when verbosing  
     if ($self->{node}->{status}->{ok} && $self->{meta}->{target}->{storage}->{isIdentAuthority}) {  
       print "r" if $self->{verbose};  
       #print Dumper($self->{meta});  
       #print Dumper($self->{node});  
       #exit;  
       $self->_doModifySource_IdentChecksum($self->{node}->{target}->{ident});  
     }  
   
     print ":" if $self->{verbose};  
   
   }  
   
   print "\n" if $self->{verbose};  
     
   # build user-message from some stats  
     my $msg = "statistics: $tc";  
       
     if ($tc->{error_per_row}) {  
       $msg .= "\n";  
       $msg .= "errors from \"error_per_row\":" . "\n";  
       $msg .= Dumper($tc->{error_per_row});  
     }  
       
     # todo!!!  
     #sysevent( { usermsg => $msg, level => $level }, $taskEvent );  
     $logger->info( __PACKAGE__ . "->syncNodes: $msg" );  
   
   return $tc;  
   
 }  
   
   
 # refactor this as some core-function to do a generic dump resolving data-encapsulations of e.g. Set::Object  
 sub _dumpCompact {  
   my $self = shift;  
   
   #my $vars = \@_;  
   my @data = ();  
   
   my $count = 0;  
   foreach (@_) {  
     my $item = {};  
     foreach my $key (keys %$_) {  
       my $val = $_->{$key};  
   
 #print Dumper($val);  
   
       if (ref $val eq 'Set::Object') {  
         #print "========================= SET", "\n";  
 #print Dumper($val);  
         #print Dumper($val->members());  
         #$val = $val->members();  
         #$vars->[$count]->{$key} = $val->members() if $val->can("members");  
         #$item->{$key} = $val->members() if $val->can("members");  
         $item->{$key} = $val->members();  
         #print Dumper($vars->[$count]->{$key});  
   
       } else {  
         $item->{$key} = $val;  
       }  
   
     }  
     push @data, $item;  
     $count++;  
   }  
   
 #print "Dump:", Dumper(@data), "\n";  
   
   $Data::Dumper::Indent = 0;  
   my $result = Dumper(@data);  
   $Data::Dumper::Indent = 2;  
   return $result;  
     
 }  
   
   
 sub _calcChecksum {  
   
   my $self = shift;  
   my $descent = shift;  
   my $specifier = shift;  
   
   # calculate checksum for current object  
     my $ident = $self->{node}->{$descent}->{ident};  
     
   # build dump of this node  
     my $payload = $self->{node}->{$descent}->{payload};  
     #my $dump = $ident . "\n" . $item->quickdump();  
     #my $dump = $ident . "\n" . Dumper($item);  
     my $dump = $ident . "\n" . $self->_dumpCompact($payload);  
     
   # TODO: $logger->dump( ... );  
     #$logger->debug( __PACKAGE__ . ": " . $dump );  
     #$logger->dump( __PACKAGE__ . ": " . $dump );  
     
   # calculate checksum from dump  
     # note: the 32-bit integer hash from DBI seems  
     # to generate duplicates with small payloads already in ranges of hundreds of items/rows!!!  
     # try to avoid to use it or try to use it only for payloads greater than, hmmm, let's say 30 chars?  
     # (we had about 15 chars average per item (row))  
   
     # md5-based fingerprint, base64 encoded (from Digest::MD5)  
       $self->{node}->{$descent}->{checksum} = md5_base64($dump) . '==';  
     # 32-bit integer "hash" value (maybe faster?) (from DBI)  
       #$self->{node}->{$descent}->{checksum} = DBI::hash($dump, 1);  
   
   # signal good  
   return 1;  
   
 }  
   
   
 sub _readChecksum {  
   my $self = shift;  
   
   my $descent = shift;  
   
   #print "getcheck:", "\n"; print Dumper($self->{node}->{$descent});  
     
   if (!$self->{node}->{$descent}) {  
     # signal checksum bad  
     return;  
   }  
   
   # get checksum for current entry  
   # TODO: don't have the checksum column/property hardcoded as "cs" here, make this configurable somehow  
   
   if ($self->{meta}->{$descent}->{storage}->{isChecksumAuthority}) {  
     #$self->{node}->{$descent}->{checksum} = $entry->{cs};  
     #$self->{node}->{$descent}->{checksum} = $self->_calcChecksum($descent); # $entry->{cs};  
     #print "descent: $descent", "\n";  
     $self->_calcChecksum($descent);  
     #print "checksum: ", $self->{node}->{$descent}->{checksum}, "\n";  
   } else {  
   
     #$self->{node}->{$descent}->{checksum} = $entry->{cs};  
     $self->{node}->{$descent}->{checksum} = $self->{node}->{$descent}->{payload}->{cs};  
   }  
   
   # signal checksum good  
   return 1;  
   
 }  
   
   
 sub _buildMap {  
   
  my $self = shift;  
   
   # field-structure for building sql  
   # mapping of sql-fieldnames to object-attributes  
     $self->{node}->{map} = {};  
   
     # manually set ...  
       # ... object-id  
       $self->{node}->{map}->{$self->{meta}->{target}->{IdentProvider}->{arg}} = $self->{node}->{source}->{ident};  
       # ... checksum  
       $self->{node}->{map}->{cs} = $self->{node}->{source}->{checksum};  
   
 #print "sqlmap: ", Dumper($self->{node}->{map}), "\n";  
   
     # for transferring flat structures via simple (1:1) mapping  
     # TODO: diff per property / property value  
   
     if ($self->{args}->{mapping}) {  
       # apply mapping from $self->{args}->{mapping} to $self->{node}->{map}  
       #foreach my $key (@{$self->{meta}->{source}->{childnodes}}) {  
       my @childnodes = @{$self->{meta}->{source}->{childnodes}};  
       for (my $mapidx = 0; $mapidx <= $#childnodes; $mapidx++) {  
         #my $map_right = $self->{args}->{mapping}->{$key};  
           
         $self->{node}->{source}->{propcache} = {};  
         $self->{node}->{target}->{propcache} = {};  
           
         # get property name  
         $self->{node}->{source}->{propcache}->{property} = $self->{meta}->{source}->{childnodes}->[$mapidx];  
         $self->{node}->{target}->{propcache}->{property} = $self->{meta}->{target}->{childnodes}->[$mapidx];  
         #print "map: $map_right", "\n";  
   
         # get property value  
         my $value;  
           
         # detect for callback - old style - (maybe the better???)  
         if (ref($self->{node}->{target}->{map}) eq 'CODE') {  
           #$value = &$map_right($objClone);  
         } else {  
           # plain (scalar?) value  
           #$value = $objClone->{$map_right};  
           $self->{node}->{source}->{propcache}->{value} = $self->{node}->{source}->{payload}->{$self->{node}->{source}->{propcache}->{property}};  
         }  
         #$self->{node}->{map}->{$key} = $value;  
           
         # detect expression  
         # for transferring deeply nested structures described by expressions  
         #print "val: $self->{node}->{source}->{propcache}->{value}", "\n";  
         if ($self->{node}->{source}->{propcache}->{property} =~ s/^expr://) {  
             
           # create an anonymous sub to act as callback target dispatcher  
             my $cb_dispatcher = sub {  
               #print "===============  CALLBACK DISPATCHER", "\n";  
               #print "ident: ", $self->{node}->{source}->{ident}, "\n";  
               #return $self->{node}->{source}->{ident};  
                 
             };  
             
   
 #print Dumper($self->{node});  
             
           # build callback map for helper function  
           #my $cbmap = { $self->{meta}->{source}->{IdentProvider}->{arg} => $cb_dispatcher };  
           my $cbmap = {};  
           my $value = refexpr2perlref($self->{node}->{source}->{payload}, $self->{node}->{source}->{propcache}->{property}, $cbmap);  
           $self->{node}->{source}->{propcache}->{value} = $value;  
         }  
   
         # encode values dependent on type of underlying storage here - expand cases...  
         my $storage_type = $self->{meta}->{target}->{storage}->{locator}->{type};  
         if ($storage_type eq 'DBI') {  
           # ...for sql  
           $self->{node}->{source}->{propcache}->{value} = quotesql($self->{node}->{source}->{propcache}->{value});  
         }  
          elsif ($storage_type eq 'Tangram') {  
           # iso? utf8 already possible?  
           
         } elsif ($storage_type eq 'LDAP') {  
           # TODO: encode utf8 here?  
         }  
   
         # store value to transfer map  
         $self->{node}->{map}->{$self->{node}->{target}->{propcache}->{property}} = $self->{node}->{source}->{propcache}->{value};  
   
       }  
     }  
   
       
   # TODO: $logger->dump( ... );  
   #$logger->debug( "sqlmap:" . "\n" . Dumper($self->{node}->{map}) );  
 #print "sqlmap: ", Dumper($self->{node}->{map}), "\n";  
 #print "entrystatus: ", Dumper($self->{node}), "\n";  
   
 }  
   
 sub _resolveNodeIdent {  
   my $self = shift;  
   my $descent = shift;  
     
   #print Dumper($self->{node}->{$descent});  
     
   # get to the payload  
     #my $item = $specifier->{item};  
     my $payload = $self->{node}->{$descent}->{payload};  
   
   # resolve method to get to the id of the given item  
   # we use global metadata and the given descent for this task  
     #my $ident = $self->{$descent}->id($item);  
     #my $ident = $self->{meta}->{$descent}->{storage}->id($item);  
   
     my $ident;  
     my $provider_method = $self->{meta}->{$descent}->{IdentProvider}->{method};  
     my $provider_arg = $self->{meta}->{$descent}->{IdentProvider}->{arg};  
   
     # resolve to ident  
     if ($provider_method eq 'property') {  
       $ident = $payload->{$provider_arg};  
   
     } elsif ($provider_method eq 'storage_method') {  
       #$ident = $self->{meta}->{$descent}->{storage}->id($item);  
       $ident = $self->{meta}->{$descent}->{storage}->$provider_arg($payload);  
     }  
       
     $self->{node}->{$descent}->{ident} = $ident;  
       
   return 1 if $ident;  
   
 }  
   
   
 sub _modifyNode {  
   my $self = shift;  
   my $descent = shift;  
   my $action = shift;  
   my $map = shift;  
   my $crit = shift;  
   
   # map for new style callbacks  
   my $map_callbacks = {};  
   
   # checks go first!  
     
     # TODO: this should be reviewed first - before extending  ;-)  
     # TODO: this should be extended:  
     # count this cases inside the caller to this sub and provide a better overall message  
     # if this counts still zero in the end:  
     #     "No nodes have been touched for modify: Do you have column-headers in your csv file?"  
     if (not defined $self->{node}) {  
       #$logger->critical( __PACKAGE__ . "->_modifyNode failed: \"$descent\" node is empty." );  
       #return;  
     }  
   
   # transfer callback nodes from value map to callback map - handle them afterwards! - (new style callbacks)  
   if (my $callbacks = $self->{meta}->{$descent}->{Callback}) {  
     foreach my $callback (keys %{$callbacks->{write}}) {  
       $map_callbacks->{write}->{$callback} = $map->{$callback};  
       delete $map->{$callback};  
     }  
   }  
     
     
   #print Dumper($self->{meta});  
   
   # DBI speaks SQL  
   if ($self->{meta}->{$descent}->{storage}->{locator}->{type} eq 'DBI') {  
   
 #print Dumper($self->{node});  
     my $sql_main;  
     # translate map to sql  
     #print $action, "\n"; exit;  
     #print $self->{meta}->{$descent}->{node}, "\n"; exit;  
     #print "action:";  
     #print $action, "\n";  
 #$action = "anc";  
 #print "yai", "\n";  
   
 #print Dumper($map);  
 #delete $map->{cs};  
   
     if (lc($action) eq 'insert') {  
       $sql_main = hash2Sql($self->{meta}->{$descent}->{node}, $map, 'SQL_INSERT');  
     } elsif (lc $action eq 'update') {  
       $crit ||= "$self->{meta}->{$descent}->{IdentProvider}->{arg}='$self->{node}->{$descent}->{ident}'";  
       $sql_main = hash2Sql($self->{meta}->{$descent}->{node}, $map, 'SQL_UPDATE', $crit);  
     }  
   
 #$sql_main = "UPDATE currencies_csv SET oid='abcdef' WHERE text='Australian Dollar' AND key='AUD';";  
 #$sql_main = "UPDATE currencies_csv SET oid='huhu2' WHERE ekey='AUD'";  
   
 #print "sql: ", $sql_main, "\n";  
 #exit;  
   
     # transfer data  
     my $sqlHandle = $self->{meta}->{$descent}->{storage}->sendCommand($sql_main);  
   
 #exit;  
   
     # handle errors  
     if ($sqlHandle->err) {  
       #if ($self->{args}->{debug}) { print "sql-error with statement: $sql_main", "\n"; }  
       $self->{node}->{status}->{error} = {  
         statement => $sql_main,  
         state => $sqlHandle->state,  
         err => $sqlHandle->err,  
         errstr => $sqlHandle->errstr,  
       };  
     } else {  
       $self->{node}->{status}->{ok} = 1;  
     }  
   
   # Tangram does it the oo-way (naturally)  
   } elsif ($self->{meta}->{$descent}->{storage}->{locator}->{type} eq 'Tangram') {  
     my $sql_main;  
     my $object;  
   
     # determine classname  
     my $classname = $self->{meta}->{$descent}->{node};  
       
     # properties to exclude  
     my @exclude = @{$self->{meta}->{$descent}->{subnodes_exclude}};  
   
   
     if (my $identProvider = $self->{meta}->{$descent}->{IdentProvider}) {  
       push @exclude, $identProvider->{arg};  
     }  
   
     # new feature:  
     #     - check TypeProvider metadata property from other side  
     #     - use argument (arg) inside as a classname for object creation on this side  
     #my $otherSide = $self->_otherSide($descent);  
     if (my $typeProvider = $self->{meta}->{$descent}->{TypeProvider}) {  
       #print Dumper($map);  
       $classname = $map->{$typeProvider->{arg}};  
       # remove nodes from map also (push nodes to "subnodes_exclude" list)  
       push @exclude, $typeProvider->{arg};  
     }  
       
     # exclude banned properties (remove from map)  
     #map { delete $self->{node}->{map}->{$_} } @{$self->{args}->{exclude}};  
     map { delete $map->{$_} } @exclude;  
   
     # list of properties  
     my @props = keys %{$map};  
       
     # transfer data  
     if (lc $action eq 'insert') {  
   
       # build array to initialize object  
       #my @initarray = ();  
       #map { push @initarray, $_, undef; } @props;  
   
       # make the object persistent in four steps:  
       #   - raw create (perl / class tangram scope)  
       #   - engine insert (tangram scope)   ... this establishes inheritance - don't try to fill in inherited properties before!  
       #   - raw fill-in from hash (perl scope)  
       #   - engine update (tangram scope)  ... this updates all properties just filled in  
         
       # create new object ...  
       #my $object = $classname->new( @initarray );  
       $object = $classname->new();  
         
       # ... pass to orm ...  
       $self->{meta}->{$descent}->{storage}->insert($object);  
   
       # ... and initialize with empty (undef'd) properties.  
       #print Dumper(@props);  
       map { $object->{$_} = undef; } @props;  
   
       # mix in values ...  
       hash2object($object, $map);  
   
       # ... and re-update@orm.  
 #print Dumper($object);  
       $self->{meta}->{$descent}->{storage}->update($object);  
   
       # asymmetry: get ident after insert  
       # TODO:  
       #   - just do this if it is an IdentAuthority  
       #   - use IdentProvider metadata here  
 #print Dumper($self->{meta}->{$descent});  
       my $oid = $self->{meta}->{$descent}->{storage}->id($object);  
 #print "oid: $oid", "\n";  
       $self->{node}->{$descent}->{ident} = $oid;  
   
   
     } elsif (lc $action eq 'update') {  
         
       # get fresh object from orm first  
       $object = $self->{meta}->{$descent}->{storage}->load($self->{node}->{$descent}->{ident});  
   
 #print Dumper($self->{node});  
         
       # mix in values  
       #print Dumper($object);  
       hash2object($object, $map);  
       #print Dumper($object);  
       #exit;  
       $self->{meta}->{$descent}->{storage}->update($object);  
     }  
   
     my $error = 0;  
   
     # handle new style callbacks - this is a HACK - do this without an eval!  
     #print Dumper($map);  
     #print "cb: ", Dumper($self->{meta}->{$descent}->{Callback});  
     #print Dumper($map_callbacks);  
     foreach my $node (keys %{$map_callbacks->{write}}) {  
       #print Dumper($node);  
       my $perl_callback = $self->{meta}->{$descent}->{node} . '::' . $node . '_write';  
       my $evalstring = $perl_callback . '( { object => $object, value => $map_callbacks->{write}->{$node}, storage => $self->{meta}->{$descent}->{storage} } );';  
       #print $evalstring, "\n"; exit;  
       eval($evalstring);  
       if ($@) {  
         $error = 1;  
         print $@, "\n";  
       }  
         
       #print "after eval", "\n";  
         
       if (!$error) {  
         # re-update@orm  
         $self->{meta}->{$descent}->{storage}->update($object);  
       }  
     }  
     
     # handle errors  
     if ($error) {  
       #print "error", "\n";  
 =pod  
       my $sqlHandle;  
       #if ($self->{args}->{debug}) { print "sql-error with statement: $sql_main", "\n"; }  
       $self->{node}->{status}->{error} = {  
         statement => $sql_main,  
         state => $sqlHandle->state,  
         err => $sqlHandle->err,  
         errstr => $sqlHandle->errstr,  
       };  
 =cut  
       # rollback....  
       #print "rollback", "\n";  
       $self->{meta}->{$descent}->{storage}->erase($object);  
       #print "after rollback", "\n";  
     } else {  
       $self->{node}->{status}->{ok} = 1;  
     }  
   
   
   }  
   
 }  
   
 # TODO:  
 # this should be split up into...  
 #   - a "_statNode" (should just touch the node to check for existance)  
 #   - a "_loadNode" (should load node completely)  
 #   - maybe additionally a "loadNodeProperty" (may specify properties to load)  
 #   - introduce $self->{nodecache} for this purpose  
 # TODO:  
 #   should we:  
 #     - not pass ident in here but resolve it via "$descent"?  
 #     - refactor this and stuff it with additional debug/error message  
 #       - this = the way the implicit load mechanism works  
 sub _statloadNode {  
   
   my $self = shift;  
   my $descent = shift;  
   my $ident = shift;  
   my $force = shift;  
   
   # fetch entry to retrieve checksum from  
   # was:  
   if (!$self->{node}->{$descent} || $force) {  
   # is:  
   #if (!$self->{node}->{$descent}->{item} || $force) {  
       
     if (!$ident) {  
       #print "\n", "Attempt to fetch entry implicitely by ident failed: no ident given! This may result in an insert if no write-protection is in the way.", "\n";  
       return;  
     }  
       
     # patch for DBD::CSV  
     if ($ident && $ident eq 'Null') {  
       return;  
     }  
   
 #print "yai!", "\n";  
   
     my $query = {  
       node => $self->{meta}->{$descent}->{node},  
       subnodes => [qw( cs )],  
       criterias => [  
         { key => $self->{meta}->{$descent}->{IdentProvider}->{arg},  
            op => 'eq',  
            val => $ident },  
       ]  
     };  
   
 #print Dumper($query);  
   
     my $result = $self->{meta}->{$descent}->{storage}->sendQuery($query);  
   
     my $entry = $result->getNextEntry();  
   
 #print Dumper($entry);  
 #print "pers: " . $self->{meta}->{$descent}->{storage}->is_persistent($entry), "\n";  
 #my $state = $self->{meta}->{$descent}->{storage}->_fetch_object_state($entry, { name => 'TransactionHop' } );  
 #print Dumper($state);  
   
     my $status = $result->getStatus();  
   
 #print Dumper($status);  
       
     # TODO: enhance error handling (store inside tc)  
     #if (!$row) {  
     #  print "\n", "row error", "\n";  
     #  next;  
     #}  
   
     # these checks run before actually loading payload- and meta-data to node-container  
       
       # 1st level - hard error  
       if ($status && $status->{err}) {  
         $logger->debug( __PACKAGE__ . "->_statloadNode (ident=\"$ident\") failed - hard error (that's ok): $status->{err}" );  
         return;  
       }  
     
       # 2nd level - logical (empty/notfound) error  
       if (($status && $status->{empty}) || !$entry) {  
         $logger->debug( __PACKAGE__ . "->_statloadNode (ident=\"$ident\") failed - logical error (that's ok)" );  
         #print "no entry (logical)", "\n";  
         return;  
       }  
   
 #print Dumper($entry);  
   
     # was:  
     # $self->{node}->{$descent}->{ident} = $ident;    
     # is:  
     # TODO: re-resolve ident from entry via metadata "IdentProvider" here - like elsewhere  
     $self->{node}->{$descent}->{ident} = $ident;  
     $self->{node}->{$descent}->{payload} = $entry;  
   
   }  
     
   return 1;  
     
 }  
   
 sub _doTransferToTarget {  
   my $self = shift;  
   my $action = shift;  
   $self->_modifyNode('target', $action, $self->{node}->{map});  
 }  
   
 sub _doModifySource_IdentChecksum {  
   my $self = shift;  
   my $ident_new = shift;  
   # this changes an old node to a new one including ident and checksum  
   # TODO:  
   #   - eventually introduce an external resource to store this data to  
   #   - we won't have to "re"-modify the source node here  
   my $map = {  
     $self->{meta}->{source}->{IdentProvider}->{arg} => $ident_new,  
     cs => $self->{node}->{target}->{checksum},  
   };  
   
 #print Dumper($map);  
 #print Dumper($self->{node});  
 #exit;  
   
   $self->_modifyNode('source', 'update', $map);  
 }  
   
   
 # this is a shortcut method  
 # ... let's try to avoid _any_ redundant code in here (ok... - at the cost of method lookups...)  
 sub _getNodeList {  
   my $self = shift;  
   my $descent = shift;  
   my $filter = shift;  
   return $self->{meta}->{$descent}->{storage}->getListFiltered($self->{meta}->{$descent}->{node}, $filter);  
 }  
   
   
 sub _prepareNode_MetaProperties {  
   my $self = shift;  
   my $descent = shift;  
   
   $logger->info( __PACKAGE__ . "->_prepareNode_MetaProperties( descent $descent )" );  
     
   # TODO: this should (better) be: "my $firstnode = $self->_getFirstNode($descent);"  
   my $list = $self->_getNodeList($descent);  
     
   # get first node  
   my $firstnode = $list->[0];  
     
   # check if node contains meta properties/nodes  
   # TODO: "cs" is hardcoded here!  
   my @required = ( $self->{meta}->{$descent}->{IdentProvider}->{arg}, 'cs' );  
   my @found = keys %$firstnode;  
   #my @diff = getDifference(\@found, \@required);  
   my $diff = getDifference(\@required, \@found);  
   #print Dumper(@found);  
   #print Dumper(@required);  
   #print Dumper(@diff);  
   #if (!$#diff || $#diff == -1) {  
   if (isEmpty($diff)) {  
     $logger->warning( __PACKAGE__ . "->_prepareNode_MetaProperties: node is lacking meta properties - will try to alter..." );  
     foreach (@required) {  
       my $sql = "ALTER TABLE $self->{meta}->{$descent}->{node} ADD COLUMN $_";  
       #print "sql: $sql", "\n";  
       my $res = $self->{meta}->{$descent}->{storage}->sendCommand($sql);  
       #print Dumper($res->getStatus());  
     }  
   }  
   
 }  
   
 sub _prepareNode_DummyIdent {  
   my $self = shift;  
   my $descent = shift;  
   
   $logger->info( __PACKAGE__ . "->_prepareNode_DummyIdent( descent $descent )" );  
   
   my $list = $self->_getNodeList($descent);  
   #print Dumper($list);  
   my $i = 0;  
   my $ident_base = 5678983;  
   my $ident_appendix = '0001';  
   foreach my $node (@$list) {  
     my $ident_dummy = $i + $ident_base;  
     $ident_dummy .= $ident_appendix;  
     my $map = {  
       $self->{meta}->{$descent}->{IdentProvider}->{arg} => $ident_dummy,  
       cs => undef,  
     };  
       
     # diff lists and ...  
     my $diff = getDifference([keys %$node], [keys %$map]);  
     next if $#{$diff} == -1;  
       
     # ... build criteria including all columns  
     my @crits;  
     foreach my $property (@$diff) {  
       next if !$property;  
       my $value = $node->{$property};  
       next if !$value;  
       push @crits, "$property='" . quotesql($value) . "'";  
     }  
     my $crit = join ' AND ', @crits;  
     print "p" if $self->{verbose};  
   
 #print Dumper($map);  
 #print Dumper($crit);  
   
     $self->_modifyNode($descent, 'update', $map, $crit);  
     $i++;  
   }  
   
   print "\n" if $self->{verbose};  
     
   if (!$i) {  
     $logger->warning( __PACKAGE__ . "->_prepareNode_DummyIdent: no nodes touched" );  
   }  
     
 }  
   
 # TODO: handle this in an abstract way (wipe out use of 'source' and/or 'target' inside core)  
 sub _otherSide {  
   my $self = shift;  
   my $descent = shift;  
   return 'source' if $descent eq 'target';  
   return 'target' if $descent eq 'source';  
   return '';  
 }  
   
 sub _erase_all {  
   my $self = shift;  
   my $descent = shift;  
   #my $node = shift;  
   my $node = $self->{meta}->{$descent}->{node};  
   $self->{meta}->{$descent}->{storage}->eraseAll($node);  
35  }  }
36    
   
 =pod  
   
   
 =head1 DESCRIPTION  
   
 Data::Transfer::Sync is a module providing a generic synchronization process  
 across arbitrary/multiple storages based on a ident/checksum mechanism.  
 It sits on top of Data::Storage.  
   
   
 =head1 REQUIREMENTS  
   
   For full functionality:  
     Data::Storage  
     Data::Transform  
     Data::Compare  
     ... and all their dependencies  
   
   
 =head1 AUTHORS / COPYRIGHT  
   
 The Data::Storage module is Copyright (c) 2002 Andreas Motl.  
 All rights reserved.  
   
 You may distribute it under the terms of either the GNU General Public  
 License or the Artistic License, as specified in the Perl README file.  
   
   
 =head1 SUPPORT / WARRANTY  
   
 Data::Storage is free software. IT COMES WITHOUT WARRANTY OF ANY KIND.  
   
   
   
 =head1 BUGS  
   
   When in "import" mode for windows file - DBD::AutoCSV may hang.  
   Hint: Maybe the source node contains an ident-, but no checksum-column?  
   
   
 =head1 USER LEVEL ERRORS  
   
 =head4 Mapping  
   
   - - - - - - - - - - - - - - - - - - - - - - - - - -  
   info: BizWorks::Process::Setup->syncResource( source_node Currency mode PULL erase 0 import 0 )critical: BizWorks::Process::Setup->startSync: Can't access mapping for node "Currency" - please check BizWorks::ResourceMapping.  
   - - - - - - - - - - - - - - - - - - - - - - - - - -  
   You have to create a sub for each node used in synchronization inside named Perl module. The name of this sub _must_ match  
   the name of the node you want to sync. This sub holds mapping metadata to give the engine hints about how  
   to access the otherwise generic nodes.  
   - - - - - - - - - - - - - - - - - - - - - - - - - -  
   
   
 =head4 DBD::AutoCSV's rulebase  
     
   - - - - - - - - - - - - - - - - - - - - - - - - - -  
   info: BizWorks::Process::Setup->syncResource( source_node Currency mode PULL erase 0 import 0 )  
   info: Data::Transfer::Sync->syncNodes: source=L/Currency <- target=R/currencies.csv  
     
   Execution ERROR: Error while scanning: Missing first row or scanrule not applied. at C:/home/amo/develop/netfrag.org/nfo/perl/libs/DBD/CSV.p  
   m line 165, <GEN9> line 1.  
    called from C:/home/amo/develop/netfrag.org/nfo/perl/libs/Data/Storage/Handler/DBI.pm at 123.  
     
   DBI-Error: DBD::AutoCSV::st fetchrow_hashref failed: Attempt to fetch row from a Non-SELECT statement  
   notice: Data::Transfer::Sync->syncNodes: No nodes to synchronize.  
   - - - - - - - - - - - - - - - - - - - - - - - - - -  
   DBD::AutoCSV contains a rulebase which is spooled down while attempting to guess the style of the csv file regarding  
   parameters like newline (eol), column-seperation-character (sep_char), quoting character (quote_char).  
   If this spool runs out of entries and no style could be resolved, DBD::CSV dies causing this "Execution ERROR" which  
   results in a "DBI-Error" afterwards.  
   - - - - - - - - - - - - - - - - - - - - - - - - - -  
   
   
 =head4 Check structure of source node  
     
   - - - - - - - - - - - - - - - - - - - - - - - - - -  
   info: Data::Transfer::Sync->syncNodes: source=L/Currency <- target=R/currencies.csv  
   critical: Data::Transfer::Sync->syncNodes: Can not synchronize: No ident found in source node, maybe try to "import" this node first.  
   - - - - - - - - - - - - - - - - - - - - - - - - - -  
   If lowlevel detection succeeds, but no other required informations are found, this message is issued.  
   "Other informations" might be:  
     - column-header-row completely missing  
     - ident column is empty  
   - - - - - - - - - - - - - - - - - - - - - - - - - -  
   
   
 =head4 Modify structure of source node  
     
   - - - - - - - - - - - - - - - - - - - - - - - - - -  
   info: Data::Transfer::Sync->syncNodes: source=L/Currency <- target=R/currencies.csv  
   info: Data::Transfer::Sync->_prepareNode_MetaProperties( descent source )  
   warning: Data::Transfer::Sync->_prepareNode_MetaProperties: node is lacking meta properties - will try to alter...  
   SQL ERROR: Command 'ALTER' not recognized or not supported!  
     
   SQL ERROR: Command 'ALTER' not recognized or not supported!  
   - - - - - - - - - - - - - - - - - - - - - - - - - -  
   The Engine found a node which structure does not match the required. It tries to alter this automatically - only when doing "import" -  
   but the DBD driver (in this case DBD::CSV) gets in the way croaking not to be able to do this.  
   This could also appear if your database connection has insufficient rights to modify the database structure.  
   DBD::CSV croaks because it doesn't implement the ALTER command, so please edit your columns manually.  
   Hint: Add columns with the names of your "ident" and "checksum" property specifications.  
   - - - - - - - - - - - - - - - - - - - - - - - - - -  
   
   
 =head4 Load source node by ident  
   
   - - - - - - - - - - - - - - - - - - - - - - - - - -  
   info: Data::Transfer::Sync->_prepareNode_DummyIdent( descent source )  
   pcritical: Data::Transfer::Sync->_modifyNode failed: "source" node is empty.  
   - - - - - - - - - - - - - - - - - - - - - - - - - -  
   The source node could not be loaded. Maybe the ident is missing. Please check manually.  
   Hint: Like above, the ident and/or checksum columns may be missing....  
   - - - - - - - - - - - - - - - - - - - - - - - - - -  
   
   
 =head1 TODO  
   
   - sub _resolveIdentProvider  
   - wrap _doModifySource and _doTransferTarget around a core function which can change virtually any type of node  
   - split this module up into Sync.pm, Sync/Core.pm, Sync/Compare.pm and Sync/Compare/Checksum.pm  
      - introduce _compareNodes as a core method and wrap it around methods in Sync/Compare/Checksum.pm  
   - introduce Sync/Compare/MyComparisonImplementation.pm  
   - some generic deferring method - e.g. "$self->defer(action)" - to be able to accumulate a bunch of actions for later processing  
      - this implies everything done is _really_ split up into generic actions - how else would we defer them???  
      - example uses:  
         - fetch whole checksum list from node  
         - remember source ident retransmits  
      - remember: this is convenient - and maybe / of course faster - but we'll loose "per-node-atomic" operations  
   - feature: mechanism to implicit inject checksum property to nodes (alter table / modify schema)  
   - expand statistics / keep track of:  
     - touched/untouched nodes  
   - full sync  
     - just do a push and a pull for now but use stats for touched nodes in between to speed up things  
   - introduce some new metadata flags for a synchronization partner which is (e.g.) of "source" or "target":  
     - isNewNodePropagator  
     - isWriteProtected  
   
   
 =cut  
   
37  1;  1;

Legend:
Removed from v.1.12  
changed lines
  Added in v.1.13

MailToCvsAdmin">MailToCvsAdmin
ViewVC Help
Powered by ViewVC 1.1.26 RSS 2.0 feed