/[cvs]/nfo/perl/libs/Data/Transfer/Sync/Core.pm
ViewVC logotype

Diff of /nfo/perl/libs/Data/Transfer/Sync/Core.pm

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

revision 1.1 by joko, Sun Jan 19 01:23:04 2003 UTC revision 1.12 by joko, Sat Jun 19 01:45:08 2004 UTC
# Line 1  Line 1 
1    ## -------------------------------------------------------------------------
2    ##
3  ##    $Id$  ##    $Id$
4  ##  ##
5  ##    Copyright (c) 2002  Andreas Motl <andreas.motl@ilo.de>  ##    Copyright (c) 2002  Andreas Motl <andreas.motl@ilo.de>
6  ##  ##
7  ##    See COPYRIGHT section in pod text below for usage and distribution rights.  ##    See COPYRIGHT section in pod text below for usage and distribution rights.
8  ##  ##
9  ##    ----------------------------------------------------------------------------------------  ## -------------------------------------------------------------------------
10  ##    $Log$  ##    $Log$
11    ##    Revision 1.12  2004/06/19 01:45:08  joko
12    ##    introduced "local checksum"-mechanism
13    ##    moved _dumpCompact to ::Compare::Checksum
14    ##
15    ##    Revision 1.11  2004/05/11 20:03:48  jonen
16    ##    bugfix[joko] related to Attribute Map
17    ##
18    ##    Revision 1.10  2003/06/25 23:03:57  joko
19    ##    no debugging
20    ##
21    ##    Revision 1.9  2003/05/13 08:17:52  joko
22    ##    buildAttributeMap now propagates error
23    ##
24    ##    Revision 1.8  2003/03/27 15:31:15  joko
25    ##    fixes to modules regarding new namespace(s) below Data::Mungle::*
26    ##
27    ##    Revision 1.7  2003/02/21 08:01:11  joko
28    ##    debugging, logging
29    ##    renamed module
30    ##
31    ##    Revision 1.6  2003/02/14 14:03:49  joko
32    ##    + logging, debugging
33    ##    - refactored code to sister module
34    ##
35    ##    Revision 1.5  2003/02/11 05:30:47  joko
36    ##    + minor fixes and some debugging mud
37    ##
38    ##    Revision 1.4  2003/02/09 05:01:10  joko
39    ##    + major structure changes
40    ##    - refactored code to sister modules
41    ##
42    ##    Revision 1.3  2003/01/20 17:01:14  joko
43    ##    + cosmetics and debugging
44    ##    + probably refactored code to new plugin-modules 'Metadata.pm' and/or 'StorageInterface.pm' (guess it was the last one...)
45    ##
46    ##    Revision 1.2  2003/01/19 02:05:42  joko
47    ##    - removed pod-documentation: now in Data/Transfer/Sync.pod
48    ##
49  ##    Revision 1.1  2003/01/19 01:23:04  joko  ##    Revision 1.1  2003/01/19 01:23:04  joko
50  ##    + new from Data/Transfer/Sync.pm  ##    + new from Data/Transfer/Sync.pm
51  ##  ##
# Line 41  Line 81 
81  ##    + minor cosmetics for logging  ##    + minor cosmetics for logging
82  ##  ##
83  ##    Revision 1.2  2002/12/01 04:43:25  joko  ##    Revision 1.2  2002/12/01 04:43:25  joko
84  ##    + mapping deatil entries may now be either an ARRAY or a HASH  ##    + mapping detail entries may now be either an ARRAY or a HASH
85  ##    + erase flag is used now (for export-operations)  ##    + erase flag is used now (for export-operations)
86  ##    + expressions to refer to values inside deep nested structures  ##    + expressions to refer to values inside deep nested structures
87  ##    - removed old mappingV2-code  ##    - removed old mappingV2-code
# Line 53  Line 93 
93  ##  ##
94  ##    Revision 1.1  2002/10/10 03:44:21  cvsjoko  ##    Revision 1.1  2002/10/10 03:44:21  cvsjoko
95  ##    + new  ##    + new
96  ##    ----------------------------------------------------------------------------------------  ## -------------------------------------------------------------------------
97    
98    
99  package Data::Transfer::Sync::Core;  package Data::Transfer::Sync::Core;
# Line 68  use mixin::with qw( Data::Transfer::Sync Line 108  use mixin::with qw( Data::Transfer::Sync
108    
109  use Data::Dumper;  use Data::Dumper;
110    
111  use misc::HashExt;  #use misc::HashExt;
112  use libp qw( md5_base64 );  use Hash::Serializer;
113  use libdb qw( quotesql hash2Sql );  use Data::Mungle::Compare::Struct qw( getDifference isEmpty );
114  use Data::Transform::Deep qw( hash2object refexpr2perlref );  use Data::Mungle::Transform::Deep qw( deep_copy expand );
 use Data::Compare::Struct qw( getDifference isEmpty );  
115  use Data::Storage::Container;  use Data::Storage::Container;
116  use DesignPattern::Object;  use DesignPattern::Object;
117    use shortcuts::database qw( quotesql );
118    
119  # get logger instance  # get logger instance
120  my $logger = Log::Dispatch::Config->instance;  my $logger = Log::Dispatch::Config->instance;
# Line 105  sub _init { Line 145  sub _init {
145      $self->{container}->addStorage($_, $self->{storages}->{$_});      $self->{container}->addStorage($_, $self->{storages}->{$_});
146    }    }
147        
148      # trace
149        #print Dumper($self);
150        #exit;
151    
152    return 1;    return 1;
153        
154  }  }
155    
156  sub _initV1 {  sub _initV1 {
157    my $self = shift;    my $self = shift;
158      $logger->debug( __PACKAGE__ . "->_initV1" );
159      die("this should not be reached!");
160    # tag storages with id-authority and checksum-provider information    # tag storages with id-authority and checksum-provider information
161    # TODO: better store tag inside metadata to hold bits together!    # TODO: better store tag inside metadata to hold bits together!
162    map { $self->{container}->{storage}->{$_}->{isIdentAuthority} = 1 } @{$self->{id_authorities}};    map { $self->{container}->{storage}->{$_}->{isIdentAuthority} = 1 } @{$self->{id_authorities}};
# Line 119  sub _initV1 { Line 165  sub _initV1 {
165  }  }
166    
167    
 sub _preCheckOptions {  
   
   my $self = shift;  
   my $opts = shift;  
   
 #print Dumper($opts);  
 #exit;  
   
   # the type of the to-be-synced item  
   if (!$opts->{source}->{nodeType}) {  
     $logger->error( __PACKAGE__ . "->_preCheckOptions failed: Please specify \"source-type\".");  
     return;  
   }  
   # the name of the (container-) node the items are listed in  
   if (!$opts->{source}->{nodeName}) {  
     $logger->error( __PACKAGE__ . "->_preCheckOptions failed: Please specify \"source-node\".");  
     return;  
   }  
   
   # a "map"-declaration which module to use for mapping- and/or lookup-purposes  
   if (!$opts->{map}) {  
     $logger->warning( __PACKAGE__ . "->_preCheckOptions: No mapping supplied - please check key 'map|mappings' in global configuration or specify additional argument '--mapping-module'.");  
     return;  
   }  
   if (!$opts->{map}->{moduleName}) {  
     $logger->warning( __PACKAGE__ . "->_preCheckOptions: Currently only perl-modules can provide mappings: Please specify one with '--mapping-module'.");  
     return;  
   }  
   
   return 1;  
   
 }  
   
   
   
   
   
   
   
   
   
 sub _buildFieldmappingV1 {  
   my $self = shift;  
   
   # build mapping  
   # incoming: and Array of node map entries (Array or Hash) - e.g.  
   #   [ 'source:item_name' => 'target:class_val' ]  
   #   { source => 'event->startDateTime', target => 'begindate' }  
   foreach (@{$self->{args}->{mapping}}) {  
     if (ref $_ eq 'ARRAY') {  
       my @entry1 = split(':', $_->[0]);  
       my @entry2 = split(':', $_->[1]);  
       my $descent = [];  
       my $node = [];  
       $descent->[0] = $entry1[0];  
       $descent->[1] = $entry2[0];  
       $node->[0] = $entry1[1];  
       $node->[1] = $entry2[1];  
       push @{$self->{meta}->{$descent->[0]}->{childnodes}}, $node->[0];  
       push @{$self->{meta}->{$descent->[1]}->{childnodes}}, $node->[1];  
     } elsif (ref $_ eq 'HASH') {  
       foreach my $entry_key (keys %$_) {  
         my $entry_val = $_->{$entry_key};  
         push @{$self->{meta}->{$entry_key}->{childnodes}}, $entry_val;  
       }  
     }  
   
   }  
   
 }  
   
 sub _buildMetadataV1 {  
   my $self = shift;  
   
   # decompose identifiers for each partner  
   # TODO: refactor!!! take this list from already established/given metadata  
   foreach ('source', 'target') {  
       
     # get/set metadata for further processing  
   
     # Partner and Node (e.g.: "L:Country" or "R:countries.csv")  
     if (my $item = $self->{args}->{$_}) {  
       my @item = split(':', $item);  
       $self->{meta}->{$_}->{dbkey} = $item[0];  
       $self->{meta}->{$_}->{node} = $item[1];  
     }  
       
     # Filter  
     if (my $item_filter = $self->{args}->{$_ . '_filter'}) {  
       $self->{meta}->{$_}->{filter} = $item_filter;  
     }  
   
     # IdentProvider  
     if (my $item_ident = $self->{args}->{$_ . '_ident'}) {  
       my @item_ident = split(':', $item_ident);  
       $self->{meta}->{$_}->{IdentProvider} = { method => $item_ident[0], arg => $item_ident[1] };  
     }  
   
 #print Dumper($self->{meta});  
   
     # TODO: ChecksumProvider  
   
     # exclude properties/subnodes  
     if (my $item_exclude = $self->{args}->{$_ . '_exclude'}) {  
       $self->{meta}->{$_}->{subnodes_exclude} = $item_exclude;  
     }  
       
     # TypeProvider  
     if (my $item_type = $self->{args}->{$_ . '_type'}) {  
       my @item_type = split(':', $item_type);  
       $self->{meta}->{$_}->{TypeProvider} = { method => $item_type[0], arg => $item_type[1] };  
     }  
       
     # Callbacks - writers (will be triggered _before_ writing to target)  
     if (my $item_writers = $self->{args}->{$_ . '_callbacks_write'}) {  
       my $descent = $_;     # this is important since the following code inside the map wants to use its own context variables  
       map { $self->{meta}->{$descent}->{Callback}->{write}->{$_}++; } @$item_writers;  
     }  
       
     # Callbacks - readers (will be triggered _after_ reading from source)  
     if (my $item_readers = $self->{args}->{$_ . '_callbacks_read'}) {  
       my $descent = $_;  
       map { $self->{meta}->{$descent}->{Callback}->{read}->{$_}++; } @$item_readers;  
     }  
       
     # resolve storage objects  
     #$self->{$_} = $self->{container}->{storage}->{$self->{meta}->{$_}->{dbkey}};  
     # relink references to metainfo  
     $self->{meta}->{$_}->{storage} = $self->{container}->{storage}->{$self->{meta}->{$_}->{dbkey}};  
     #print "iiiiisprov: ", Dumper($self->{meta}->{$_}->{storage}), "\n";  
   }  
   
 }  
   
168  # TODO: abstract the hardwired use of "source" and "target" in here somehow - hmmmm.......  /(="§/%???  # TODO: abstract the hardwired use of "source" and "target" in here somehow - hmmmm.......  /(="§/%???
169  sub _syncNodes {  sub _run {
170    
171    my $self = shift;    my $self = shift;
172        
173    my $tc = OneLineDumpHash->new( {} );    #print "verbose: ", $self->{verbose}, "\n";
174      $self->{verbose} = 1;
175      
176      $logger->debug( __PACKAGE__ . "->_run" );
177    
178      # for statistics
179      my $tc = Hash::Serializer->new( {} );
180    my $results;    my $results;
181        
182    # set of objects is already in $self->{args}    # set of objects is already in $self->{args}
# Line 273  sub _syncNodes { Line 191  sub _syncNodes {
191    }    }
192        
193    # get reference to node list from convenient method provided by CORE-HANDLE    # get reference to node list from convenient method provided by CORE-HANDLE
   #$results ||= $self->{source}->getListUnfiltered($self->{meta}->{source}->{node});  
   #$results ||= $self->{meta}->{source}->{storage}->getListUnfiltered($self->{meta}->{source}->{node});  
194    $results ||= $self->_getNodeList('source');    $results ||= $self->_getNodeList('source');
195    
196    # checkpoint: do we actually have a list to iterate through?    # checkpoint: do we actually have a list to iterate through?
197    if (!$results || !@{$results}) {    if (!$results || !@{$results}) {
198      $logger->notice( __PACKAGE__ . "->syncNodes: No nodes to synchronize." );      $logger->notice( __PACKAGE__ . "->_run: No nodes to synchronize." );
199        return;
200      }
201    
202      #print Dumper(@$results);
203      #exit;
204    
205      # check if we actually *have* a synchronization method
206      if (!$self->{options}->{metadata}->{syncMethod}) {
207        $logger->critical( __PACKAGE__ . "->_run: No synchronization method (checksum|timestamp) specified" );
208      return;      return;
209    }    }
210        
211        
212    # dereference    # dereference
213    my @results = @{$results};    my @results = @{$results};
214      #print Dumper(@results);
215    
216    # iterate through set    # iterate through set
217    foreach my $source_node_real (@results) {    foreach my $source_node_real (@results) {
218    
219        print ":" if $self->{verbose};
220    
221        #print Dumper($source_node_real);
222    
223      $tc->{total}++;      $tc->{total}++;
224    
225  #print "========================  iter", "\n";      #print "=" x 80, "\n";
226    
227      # clone object (in case we have to modify it here)      # clone object (in case we have to modify it here)
228      # TODO:      # TODO:
229      #   - is a "deep_copy" needed here if occouring modifications take place?      #   - is a "deep_copy" needed here if occouring modifications take place?
230      #   - puuhhhh, i guess a deep_copy would destroy tangram mechanisms?      #   - puuhhhh, i guess a deep_copy would destroy tangram mechanisms?
231      #   - after all, just take care for now that this object doesn't get updated!      #   - after all, just take care for now that this object doesn't get updated!
232        #   - so, just use its reference for now - if some cloning is needed in future, do this here!
233      my $source_node = $source_node_real;      my $source_node = $source_node_real;
234        #my $source_node = expand($source_node_real);
235    
236      # modify entry - handle new style callbacks (the readers)      # modify entry - handle new style callbacks (the readers)
237  #print Dumper($source_node);  
238  #exit;      # trace
239        #print Dumper($source_node);
240        #exit;
241    
242      my $descent = 'source';      my $descent = 'source';
243    
# Line 310  sub _syncNodes { Line 245  sub _syncNodes {
245      my $map_callbacks = {};      my $map_callbacks = {};
246      if (my $callbacks = $self->{meta}->{$descent}->{Callback}) {      if (my $callbacks = $self->{meta}->{$descent}->{Callback}) {
247    
248          # trace
249            #print Dumper($callbacks);
250            #exit;
251    
252        my $error = 0;        my $error = 0;
253    
254        foreach my $node (keys %{$callbacks->{read}}) {        foreach my $node (keys %{$callbacks->{read}}) {
255                    
256            #print "cb_node: $node", "\n";
257            
258          my $object = $source_node;          my $object = $source_node;
259          my $value; # = $source_node->{$node};          my $value; # = $source_node->{$node};
260    
261          # trace
262            #print Dumper($self->{options});
263    
264          # ------------  half-redundant: make $self->callCallback($object, $value, $opts)          # ------------  half-redundant: make $self->callCallback($object, $value, $opts)
265          my $perl_callback = $self->{meta}->{$descent}->{node} . '::' . $node . '_read';          #my $perl_callback = $self->{meta}->{$descent}->{node} . '::' . $node . '_read';
266            my $perl_callback = $self->{meta}->{$descent}->{nodeType} . '::' . $node . '_read';
267          my $evalstring = 'return ' . $perl_callback . '( { object => $object, property => $node, value => $value, storage => $self->{meta}->{$descent}->{storage} } );';          my $evalstring = 'return ' . $perl_callback . '( { object => $object, property => $node, value => $value, storage => $self->{meta}->{$descent}->{storage} } );';
268          #print $evalstring, "\n"; exit;          #print $evalstring, "\n"; exit;
269          my $cb_result = eval($evalstring);          my $cb_result = eval($evalstring);
270          if ($@) {          if ($@) {
           die $@;  
271            $error = 1;            $error = 1;
272            print $@, "\n";            $logger->error( __PACKAGE__ . "->_run: $@" );
273              next;
274          }          }
275          # ------------  half-redundant: make $self->callCallback($object, $value, $opts)          # ------------  half-redundant: make $self->callCallback($object, $value, $opts)
276                    
# Line 335  sub _syncNodes { Line 280  sub _syncNodes {
280    
281      }      }
282    
283  #print Dumper($source_node);      # trace
284          #print Dumper($source_node);
285    
286      # exclude defined fields  (simply delete from object)      # exclude defined fields  (simply delete from object)
287      map { delete $source_node->{$_} } @{$self->{meta}->{source}->{subnodes_exclude}};      map { delete $source_node->{$_} } @{$self->{meta}->{source}->{subnodes_exclude}};
# Line 344  sub _syncNodes { Line 290  sub _syncNodes {
290      $self->{node} = {};      $self->{node} = {};
291      $self->{node}->{source}->{payload} = $source_node;      $self->{node}->{source}->{payload} = $source_node;
292    
293  #print "res - ident", "\n";      # trace
294          #print Dumper($self->{node});
295          #exit;
296    
297      # determine ident of entry      # determine ident of entry
298      my $identOK = $self->_resolveNodeIdent('source');      my $identOK = $self->_resolveNodeIdent('source');
299      #if (!$identOK && lc $self->{args}->{direction} ne 'import') {      #if (!$identOK && lc $self->{args}->{direction} ne 'import') {
300      if (!$identOK) {      if (!$identOK) {
301        #print Dumper($self->{meta}->{source});        #print Dumper($self->{meta}->{source});
302        $logger->critical( __PACKAGE__ . "->syncNodes: No ident found in source node \"$self->{meta}->{source}->{node}\", try to \"prepare\" this node first?" );        $logger->critical( __PACKAGE__ . "->_run: No ident found in source node ( nodeName='$self->{meta}->{source}->{nodeName}', nodeType='$self->{meta}->{source}->{nodeType}') try to \"prepare\" this node first?" );
303        return;        return;
304      }      }
305    
306  #print "statload", "\n";      #print "l" if $self->{verbose};
 #print "ident: ", $self->{node}->{source}->{ident}, "\n";  
 #print Dumper($self->{node});  
       
307      my $statOK = $self->_statloadNode('target', $self->{node}->{source}->{ident});      my $statOK = $self->_statloadNode('target', $self->{node}->{source}->{ident});
308    
 #print Dumper($self->{node});  
       
309      # mark node as new either if there's no ident or if stat/load failed      # mark node as new either if there's no ident or if stat/load failed
310      if (!$statOK) {      if (!$statOK) {
311        $self->{node}->{status}->{new} = 1;        $self->{node}->{status}->{new} = 1;
312        print "n" if $self->{verbose};        print "n" if $self->{verbose};
313      }      }
314    
 #print "checksum", "\n";  
       
315      # determine status of entry by synchronization method      # determine status of entry by synchronization method
316      if ( (lc $self->{args}->{method} eq 'checksum') ) {      if ( lc $self->{options}->{metadata}->{syncMethod} eq 'checksum' ) {
317      #if ( $statOK && (lc $self->{args}->{method} eq 'checksum') ) {      #if ( $statOK && (lc $self->{args}->{method} eq 'checksum') ) {
318      #if ( !$self->{node}->{status}->{new} && (lc $self->{args}->{method} eq 'checksum') ) {      #if ( !$self->{node}->{status}->{new} && (lc $self->{args}->{method} eq 'checksum') ) {
319                
320        # TODO:      # calculate local checksum of source node
321        # is this really worth a "critical"???      $self->handleLocalChecksum('source');
322        # no - it should just be a debug appendix i believe      
323        # calculate checksum of source node
 #print "readcs", "\n";  
         
       # calculate checksum of source node  
324        #$self->_calcChecksum('source');        #$self->_calcChecksum('source');
325        if (!$self->_readChecksum('source')) {        if (!$self->readChecksum('source')) {
326          $logger->critical( __PACKAGE__ . "->_readChecksum: Could not find \"source\" entry with ident=\"$self->{node}->{source}->{ident}\"" );          $logger->warning( __PACKAGE__ . "->_run: Could not find \"source\" entry with ident=\"$self->{node}->{source}->{ident}\"" );
327          $tc->{skip}++;          $tc->{skip}++;
328          print "s" if $self->{verbose};          print "s" if $self->{verbose};
329          next;          next;
330        }        }
331                
332        # get checksum from synchronization target        # get checksum from synchronization target
333        $self->_readChecksum('target');        $self->readChecksum('target');
334        #if (!$self->_readChecksum('target')) {        #if (!$self->readChecksum('target')) {
335        #  $logger->critical( __PACKAGE__ . "->_readChecksum: Could not find \"target\" entry with ident=\"$self->{node}->{source}->{ident}\"" );        #  $logger->critical( __PACKAGE__ . "->readChecksum: Could not find \"target\" entry with ident=\"$self->{node}->{source}->{ident}\"" );
336        #  next;        #  next;
337        #}        #}
338        
# Line 407  sub _syncNodes { Line 345  sub _syncNodes {
345        # determine if entry is "new" or "dirty"        # determine if entry is "new" or "dirty"
346        # after all, this seems to be the point where the hammer falls.....        # after all, this seems to be the point where the hammer falls.....
347        print "c" if $self->{verbose};        print "c" if $self->{verbose};
348    
349          # trace
350            #print Dumper($self->{node});
351            #exit;
352            #print "LOCAL: ", $self->{node}->{source}->{checksum_local_storage}, " <-> ", $self->{node}->{source}->{checksum_local_calculated}, "\n";
353            #print "REMOTE: ", $self->{node}->{source}->{checksum}, " <-> ", $self->{node}->{target}->{checksum}, "\n";
354    
355          # calculate new/dirty status
356        $self->{node}->{status}->{new} = !$self->{node}->{target}->{checksum};        $self->{node}->{status}->{new} = !$self->{node}->{target}->{checksum};
357        if (!$self->{node}->{status}->{new}) {        if (!$self->{node}->{status}->{new}) {
358          $self->{node}->{status}->{dirty} =          $self->{node}->{status}->{dirty} =
# Line 416  sub _syncNodes { Line 362  sub _syncNodes {
362            $self->{args}->{force};            $self->{args}->{force};
363        }        }
364    
365          # new 2004-06-17: also check if local checksum is inconsistent
366          if ($self->{node}->{source}->{checksum_local_storage} ne $self->{node}->{source}->{checksum_local_calculated}) {
367            $self->{node}->{status}->{dirty_local} = 1;
368            $self->{node}->{status}->{dirty} = 1;
369          }
370    
371        } else {
372          $logger->warning( __PACKAGE__ . "->_run: Synchronization method '$self->{options}->{metadata}->{syncMethod}' is not implemented" );
373          $tc->{skip}++;
374          print "s" if $self->{verbose};
375          next;
376        }
377    
378        # new 2004-06-17: also update local checksum
379        if ($self->{node}->{status}->{dirty_local}) {
380          $tc->{locally_modified}++;
381          print "[lm]" if $self->{verbose};
382          $self->_doModify_LocalChecksum('source');
383      }      }
384    
385      # first reaction on entry-status: continue with next entry if the current is already "in sync"      # first reaction on entry-status: continue with next entry if the current is already "in sync"
# Line 424  sub _syncNodes { Line 388  sub _syncNodes {
388        next;        next;
389      }      }
390    
391      # build map to actually transfer the data from source to target      #print Dumper($self->{node}->{source});
     $self->_buildMap();  
   
392    
393  #print Dumper($self->{node}); exit;      # build map to actually transfer the data from source to target
394        if (!$self->buildAttributeMap()) {
395          #$logger->warning( __PACKAGE__ . "->_run: Attribute Map could not be created. Will not insert or modify node.");
396          push( @{$tc->{error_per_row}}, "Attribute Map could not be created. Will not insert or modify node $self->{node}->{source}->{ident}.");
397          #push( @{$tc->{error_per_row}}, "Attribute Map could not be created. Will not insert or modify node " . Dumper($self->{node}->{source}) . ".");
398          $tc->{error}++;
399          print "e" if $self->{verbose};
400          next;
401        }
402    
403  #print "attempt", "\n";      # trace
404          #print Dumper($self->{node}); exit;
405          #print "attempt", "\n";
406    
407      # additional (new) checks for feature "write-protection"      # additional (new) checks for feature "write-protection"
408      if ($self->{meta}->{target}->{storage}->{isWriteProtected}) {      if ($self->{meta}->{target}->{storage}->{isWriteProtected}) {
409        $tc->{attempt_transfer}++;        $tc->{attempt_transfer}++;
410        print "\n" if $self->{verbose};        print "\n" if $self->{verbose};
411        $logger->notice( __PACKAGE__ . "->syncNodes: Target is write-protected. Will not insert or modify node. " .        $logger->notice( __PACKAGE__ . "->_run: Target is write-protected. Will not insert or modify node. " .
412            "(Ident: $self->{node}->{source}->{ident} " . "Dump:\n" . Dumper($self->{node}->{source}->{payload}) . ")" );            "(Ident: $self->{node}->{source}->{ident} " . "Dump:\n" . Dumper($self->{node}->{source}->{payload}) . ")" );
413        print "\n" if $self->{verbose};        print "\n" if $self->{verbose};
414        $tc->{skip}++;        $tc->{skip}++;
415        next;        next;
416      }      }
417    
418        # trace
419          #print Dumper($self);
420          #exit;
421    
422      # transfer contents of map to target      # transfer contents of map to target
423      if ($self->{node}->{status}->{new}) {      if ($self->{node}->{status}->{new}) {
424        $tc->{attempt_new}++;        $tc->{attempt_new}++;
# Line 450  sub _syncNodes { Line 426  sub _syncNodes {
426        # asymmetry: refetch node from target to re-calculate new ident and checksum (TODO: is IdentAuthority of relevance here?)        # asymmetry: refetch node from target to re-calculate new ident and checksum (TODO: is IdentAuthority of relevance here?)
427        #print Dumper($self->{node});        #print Dumper($self->{node});
428        $self->_statloadNode('target', $self->{node}->{target}->{ident}, 1);        $self->_statloadNode('target', $self->{node}->{target}->{ident}, 1);
429        $self->_readChecksum('target');        $self->readChecksum('target');
430    
431      } elsif ($self->{node}->{status}->{dirty}) {      } elsif ($self->{node}->{status}->{dirty}) {
432        $tc->{attempt_modify}++;        $tc->{attempt_modify}++;
433        # asymmetry: get ident before updating (TODO: is IdentAuthority of relevance here?)        # asymmetry: get ident before updating (TODO: is IdentAuthority of relevance here?)
434        $self->{node}->{target}->{ident} = $self->{node}->{map}->{$self->{meta}->{target}->{IdentProvider}->{arg}};        $self->{node}->{target}->{ident} = $self->{node}->{map}->{$self->{meta}->{target}->{IdentProvider}->{arg}};
435        $self->_doTransferToTarget('update');        $self->_doTransferToTarget('update');
436        $self->_readChecksum('target');        $self->readChecksum('target');
437      }      }
438    
439      if ($self->{node}->{status}->{ok}) {      if ($self->{node}->{status}->{ok}) {
# Line 471  sub _syncNodes { Line 447  sub _syncNodes {
447        print "e" if $self->{verbose};        print "e" if $self->{verbose};
448      }      }
449            
450        # trace
451          #print Dumper($self);
452          #exit;
453        
454      # change ident in source (take from target), if transfer was ok and target is an IdentAuthority      # change ident in source (take from target), if transfer was ok and target is an IdentAuthority
455      # this is (for now) called a "retransmit" indicated by a "r"-character when verbosing      # this is (for now) called a "retransmit" indicated by a "r"-character when verbosing
456      if ($self->{node}->{status}->{ok} && $self->{meta}->{target}->{storage}->{isIdentAuthority}) {      #if ($self->{node}->{status}->{ok} && $self->{options}->{target}->{storage}->{idAuthority}) {
457        if ($self->{node}->{status}->{ok} && $self->{meta}->{target}->{isIdentAuthority}) {
458        print "r" if $self->{verbose};        print "r" if $self->{verbose};
459        #print Dumper($self->{meta});        #print Dumper($self->{meta});
460        #print Dumper($self->{node});        #print Dumper($self->{node});
461        #exit;        #exit;
462        $self->_doModifySource_IdentChecksum($self->{node}->{target}->{ident});        $self->_doModifySource_IdentChecksum($self->{node}->{target}->{ident});
463      }      }
464        
465      print ":" if $self->{verbose};      #print "UNLOAD", "\n";
466        #$self->{meta}->{source}->{storage}->unload( $self->{node}->{source}->{payload} );
467    
468    }    }
469    
# Line 498  sub _syncNodes { Line 480  sub _syncNodes {
480            
481      # todo!!!      # todo!!!
482      #sysevent( { usermsg => $msg, level => $level }, $taskEvent );      #sysevent( { usermsg => $msg, level => $level }, $taskEvent );
483      $logger->info( __PACKAGE__ . "->syncNodes: $msg" );      #$logger->info( __PACKAGE__ . "->_run: $msg" );
484        $logger->info($msg . "\n");
485    
486    return $tc;    return $tc;
487    
488  }  }
489    
490    
491  # refactor this as some core-function to do a generic dump resolving data-encapsulations of e.g. Set::Object  sub _doTransferToTarget {
 sub _dumpCompact {  
   my $self = shift;  
   
   #my $vars = \@_;  
   my @data = ();  
   
   my $count = 0;  
   foreach (@_) {  
     my $item = {};  
     foreach my $key (keys %$_) {  
       my $val = $_->{$key};  
   
 #print Dumper($val);  
   
       if (ref $val eq 'Set::Object') {  
         #print "========================= SET", "\n";  
 #print Dumper($val);  
         #print Dumper($val->members());  
         #$val = $val->members();  
         #$vars->[$count]->{$key} = $val->members() if $val->can("members");  
         #$item->{$key} = $val->members() if $val->can("members");  
         $item->{$key} = $val->members();  
         #print Dumper($vars->[$count]->{$key});  
   
       } else {  
         $item->{$key} = $val;  
       }  
   
     }  
     push @data, $item;  
     $count++;  
   }  
   
 #print "Dump:", Dumper(@data), "\n";  
   
   $Data::Dumper::Indent = 0;  
   my $result = Dumper(@data);  
   $Data::Dumper::Indent = 2;  
   return $result;  
     
 }  
   
   
 sub _calcChecksum {  
   
   my $self = shift;  
   my $descent = shift;  
   my $specifier = shift;  
   
   # calculate checksum for current object  
     my $ident = $self->{node}->{$descent}->{ident};  
     
   # build dump of this node  
     my $payload = $self->{node}->{$descent}->{payload};  
     #my $dump = $ident . "\n" . $item->quickdump();  
     #my $dump = $ident . "\n" . Dumper($item);  
     my $dump = $ident . "\n" . $self->_dumpCompact($payload);  
     
   # TODO: $logger->dump( ... );  
     #$logger->debug( __PACKAGE__ . ": " . $dump );  
     #$logger->dump( __PACKAGE__ . ": " . $dump );  
     
   # calculate checksum from dump  
     # note: the 32-bit integer hash from DBI seems  
     # to generate duplicates with small payloads already in ranges of hundreds of items/rows!!!  
     # try to avoid to use it or try to use it only for payloads greater than, hmmm, let's say 30 chars?  
     # (we had about 15 chars average per item (row))  
   
     # md5-based fingerprint, base64 encoded (from Digest::MD5)  
       $self->{node}->{$descent}->{checksum} = md5_base64($dump) . '==';  
     # 32-bit integer "hash" value (maybe faster?) (from DBI)  
       #$self->{node}->{$descent}->{checksum} = DBI::hash($dump, 1);  
   
   # signal good  
   return 1;  
   
 }  
   
   
 sub _readChecksum {  
   my $self = shift;  
   
   my $descent = shift;  
   
   #print "getcheck:", "\n"; print Dumper($self->{node}->{$descent});  
     
   if (!$self->{node}->{$descent}) {  
     # signal checksum bad  
     return;  
   }  
   
   # get checksum for current entry  
   # TODO: don't have the checksum column/property hardcoded as "cs" here, make this configurable somehow  
   
   if ($self->{meta}->{$descent}->{storage}->{isChecksumAuthority}) {  
     #$self->{node}->{$descent}->{checksum} = $entry->{cs};  
     #$self->{node}->{$descent}->{checksum} = $self->_calcChecksum($descent); # $entry->{cs};  
     #print "descent: $descent", "\n";  
     $self->_calcChecksum($descent);  
     #print "checksum: ", $self->{node}->{$descent}->{checksum}, "\n";  
   } else {  
   
     #$self->{node}->{$descent}->{checksum} = $entry->{cs};  
     $self->{node}->{$descent}->{checksum} = $self->{node}->{$descent}->{payload}->{cs};  
   }  
   
   # signal checksum good  
   return 1;  
   
 }  
   
   
 sub _buildMap {  
   
  my $self = shift;  
   
   # field-structure for building sql  
   # mapping of sql-fieldnames to object-attributes  
     $self->{node}->{map} = {};  
   
     # manually set ...  
       # ... object-id  
       $self->{node}->{map}->{$self->{meta}->{target}->{IdentProvider}->{arg}} = $self->{node}->{source}->{ident};  
       # ... checksum  
       $self->{node}->{map}->{cs} = $self->{node}->{source}->{checksum};  
   
 #print "sqlmap: ", Dumper($self->{node}->{map}), "\n";  
   
     # for transferring flat structures via simple (1:1) mapping  
     # TODO: diff per property / property value  
   
     if ($self->{args}->{mapping}) {  
       # apply mapping from $self->{args}->{mapping} to $self->{node}->{map}  
       #foreach my $key (@{$self->{meta}->{source}->{childnodes}}) {  
       my @childnodes = @{$self->{meta}->{source}->{childnodes}};  
       for (my $mapidx = 0; $mapidx <= $#childnodes; $mapidx++) {  
         #my $map_right = $self->{args}->{mapping}->{$key};  
           
         $self->{node}->{source}->{propcache} = {};  
         $self->{node}->{target}->{propcache} = {};  
           
         # get property name  
         $self->{node}->{source}->{propcache}->{property} = $self->{meta}->{source}->{childnodes}->[$mapidx];  
         $self->{node}->{target}->{propcache}->{property} = $self->{meta}->{target}->{childnodes}->[$mapidx];  
         #print "map: $map_right", "\n";  
   
         # get property value  
         my $value;  
           
         # detect for callback - old style - (maybe the better???)  
         if (ref($self->{node}->{target}->{map}) eq 'CODE') {  
           #$value = &$map_right($objClone);  
         } else {  
           # plain (scalar?) value  
           #$value = $objClone->{$map_right};  
           $self->{node}->{source}->{propcache}->{value} = $self->{node}->{source}->{payload}->{$self->{node}->{source}->{propcache}->{property}};  
         }  
         #$self->{node}->{map}->{$key} = $value;  
           
         # detect expression  
         # for transferring deeply nested structures described by expressions  
         #print "val: $self->{node}->{source}->{propcache}->{value}", "\n";  
         if ($self->{node}->{source}->{propcache}->{property} =~ s/^expr://) {  
             
           # create an anonymous sub to act as callback target dispatcher  
             my $cb_dispatcher = sub {  
               #print "===============  CALLBACK DISPATCHER", "\n";  
               #print "ident: ", $self->{node}->{source}->{ident}, "\n";  
               #return $self->{node}->{source}->{ident};  
                 
             };  
             
   
 #print Dumper($self->{node});  
             
           # build callback map for helper function  
           #my $cbmap = { $self->{meta}->{source}->{IdentProvider}->{arg} => $cb_dispatcher };  
           my $cbmap = {};  
           my $value = refexpr2perlref($self->{node}->{source}->{payload}, $self->{node}->{source}->{propcache}->{property}, $cbmap);  
           $self->{node}->{source}->{propcache}->{value} = $value;  
         }  
   
         # encode values dependent on type of underlying storage here - expand cases...  
         my $storage_type = $self->{meta}->{target}->{storage}->{locator}->{type};  
         if ($storage_type eq 'DBI') {  
           # ...for sql  
           $self->{node}->{source}->{propcache}->{value} = quotesql($self->{node}->{source}->{propcache}->{value});  
         }  
          elsif ($storage_type eq 'Tangram') {  
           # iso? utf8 already possible?  
           
         } elsif ($storage_type eq 'LDAP') {  
           # TODO: encode utf8 here?  
         }  
   
         # store value to transfer map  
         $self->{node}->{map}->{$self->{node}->{target}->{propcache}->{property}} = $self->{node}->{source}->{propcache}->{value};  
   
       }  
     }  
   
       
   # TODO: $logger->dump( ... );  
   #$logger->debug( "sqlmap:" . "\n" . Dumper($self->{node}->{map}) );  
 #print "sqlmap: ", Dumper($self->{node}->{map}), "\n";  
 #print "entrystatus: ", Dumper($self->{node}), "\n";  
   
 }  
   
 sub _resolveNodeIdent {  
   my $self = shift;  
   my $descent = shift;  
     
   #print Dumper($self->{node}->{$descent});  
     
   # get to the payload  
     #my $item = $specifier->{item};  
     my $payload = $self->{node}->{$descent}->{payload};  
   
   # resolve method to get to the id of the given item  
   # we use global metadata and the given descent for this task  
     #my $ident = $self->{$descent}->id($item);  
     #my $ident = $self->{meta}->{$descent}->{storage}->id($item);  
   
     my $ident;  
     my $provider_method = $self->{meta}->{$descent}->{IdentProvider}->{method};  
     my $provider_arg = $self->{meta}->{$descent}->{IdentProvider}->{arg};  
   
     # resolve to ident  
     if ($provider_method eq 'property') {  
       $ident = $payload->{$provider_arg};  
   
     } elsif ($provider_method eq 'storage_method') {  
       #$ident = $self->{meta}->{$descent}->{storage}->id($item);  
       $ident = $self->{meta}->{$descent}->{storage}->$provider_arg($payload);  
     }  
       
     $self->{node}->{$descent}->{ident} = $ident;  
       
   return 1 if $ident;  
   
 }  
   
   
 sub _modifyNode {  
492    my $self = shift;    my $self = shift;
   my $descent = shift;  
493    my $action = shift;    my $action = shift;
   my $map = shift;  
   my $crit = shift;  
   
   # map for new style callbacks  
   my $map_callbacks = {};  
   
   # checks go first!  
     
     # TODO: this should be reviewed first - before extending  ;-)  
     # TODO: this should be extended:  
     # count this cases inside the caller to this sub and provide a better overall message  
     # if this counts still zero in the end:  
     #     "No nodes have been touched for modify: Do you have column-headers in your csv file?"  
     if (not defined $self->{node}) {  
       #$logger->critical( __PACKAGE__ . "->_modifyNode failed: \"$descent\" node is empty." );  
       #return;  
     }  
   
   # transfer callback nodes from value map to callback map - handle them afterwards! - (new style callbacks)  
   if (my $callbacks = $self->{meta}->{$descent}->{Callback}) {  
     foreach my $callback (keys %{$callbacks->{write}}) {  
       $map_callbacks->{write}->{$callback} = $map->{$callback};  
       delete $map->{$callback};  
     }  
   }  
     
     
   #print Dumper($self->{meta});  
   
   # DBI speaks SQL  
   if ($self->{meta}->{$descent}->{storage}->{locator}->{type} eq 'DBI') {  
   
 #print Dumper($self->{node});  
     my $sql_main;  
     # translate map to sql  
     #print $action, "\n"; exit;  
     #print $self->{meta}->{$descent}->{node}, "\n"; exit;  
     #print "action:";  
     #print $action, "\n";  
 #$action = "anc";  
 #print "yai", "\n";  
   
 #print Dumper($map);  
 #delete $map->{cs};  
   
     if (lc($action) eq 'insert') {  
       $sql_main = hash2Sql($self->{meta}->{$descent}->{node}, $map, 'SQL_INSERT');  
     } elsif (lc $action eq 'update') {  
       $crit ||= "$self->{meta}->{$descent}->{IdentProvider}->{arg}='$self->{node}->{$descent}->{ident}'";  
       $sql_main = hash2Sql($self->{meta}->{$descent}->{node}, $map, 'SQL_UPDATE', $crit);  
     }  
   
 #$sql_main = "UPDATE currencies_csv SET oid='abcdef' WHERE text='Australian Dollar' AND key='AUD';";  
 #$sql_main = "UPDATE currencies_csv SET oid='huhu2' WHERE ekey='AUD'";  
   
 #print "sql: ", $sql_main, "\n";  
 #exit;  
   
     # transfer data  
     my $sqlHandle = $self->{meta}->{$descent}->{storage}->sendCommand($sql_main);  
   
 #exit;  
   
     # handle errors  
     if ($sqlHandle->err) {  
       #if ($self->{args}->{debug}) { print "sql-error with statement: $sql_main", "\n"; }  
       $self->{node}->{status}->{error} = {  
         statement => $sql_main,  
         state => $sqlHandle->state,  
         err => $sqlHandle->err,  
         errstr => $sqlHandle->errstr,  
       };  
     } else {  
       $self->{node}->{status}->{ok} = 1;  
     }  
   
   # Tangram does it the oo-way (naturally)  
   } elsif ($self->{meta}->{$descent}->{storage}->{locator}->{type} eq 'Tangram') {  
     my $sql_main;  
     my $object;  
   
     # determine classname  
     my $classname = $self->{meta}->{$descent}->{node};  
       
     # properties to exclude  
     my @exclude = @{$self->{meta}->{$descent}->{subnodes_exclude}};  
   
   
     if (my $identProvider = $self->{meta}->{$descent}->{IdentProvider}) {  
       push @exclude, $identProvider->{arg};  
     }  
   
     # new feature:  
     #     - check TypeProvider metadata property from other side  
     #     - use argument (arg) inside as a classname for object creation on this side  
     #my $otherSide = $self->_otherSide($descent);  
     if (my $typeProvider = $self->{meta}->{$descent}->{TypeProvider}) {  
       #print Dumper($map);  
       $classname = $map->{$typeProvider->{arg}};  
       # remove nodes from map also (push nodes to "subnodes_exclude" list)  
       push @exclude, $typeProvider->{arg};  
     }  
       
     # exclude banned properties (remove from map)  
     #map { delete $self->{node}->{map}->{$_} } @{$self->{args}->{exclude}};  
     map { delete $map->{$_} } @exclude;  
   
     # list of properties  
     my @props = keys %{$map};  
       
     # transfer data  
     if (lc $action eq 'insert') {  
   
       # build array to initialize object  
       #my @initarray = ();  
       #map { push @initarray, $_, undef; } @props;  
   
       # make the object persistent in four steps:  
       #   - raw create (perl / class tangram scope)  
       #   - engine insert (tangram scope)   ... this establishes inheritance - don't try to fill in inherited properties before!  
       #   - raw fill-in from hash (perl scope)  
       #   - engine update (tangram scope)  ... this updates all properties just filled in  
         
       # create new object ...  
       #my $object = $classname->new( @initarray );  
       $object = $classname->new();  
         
       # ... pass to orm ...  
       $self->{meta}->{$descent}->{storage}->insert($object);  
   
       # ... and initialize with empty (undef'd) properties.  
       #print Dumper(@props);  
       map { $object->{$_} = undef; } @props;  
   
       # mix in values ...  
       hash2object($object, $map);  
   
       # ... and re-update@orm.  
 #print Dumper($object);  
       $self->{meta}->{$descent}->{storage}->update($object);  
   
       # asymmetry: get ident after insert  
       # TODO:  
       #   - just do this if it is an IdentAuthority  
       #   - use IdentProvider metadata here  
 #print Dumper($self->{meta}->{$descent});  
       my $oid = $self->{meta}->{$descent}->{storage}->id($object);  
 #print "oid: $oid", "\n";  
       $self->{node}->{$descent}->{ident} = $oid;  
   
   
     } elsif (lc $action eq 'update') {  
         
       # get fresh object from orm first  
       $object = $self->{meta}->{$descent}->{storage}->load($self->{node}->{$descent}->{ident});  
   
 #print Dumper($self->{node});  
         
       # mix in values  
       #print Dumper($object);  
       hash2object($object, $map);  
       #print Dumper($object);  
       #exit;  
       $self->{meta}->{$descent}->{storage}->update($object);  
     }  
   
     my $error = 0;  
   
     # handle new style callbacks - this is a HACK - do this without an eval!  
     #print Dumper($map);  
     #print "cb: ", Dumper($self->{meta}->{$descent}->{Callback});  
     #print Dumper($map_callbacks);  
     foreach my $node (keys %{$map_callbacks->{write}}) {  
       #print Dumper($node);  
       my $perl_callback = $self->{meta}->{$descent}->{node} . '::' . $node . '_write';  
       my $evalstring = $perl_callback . '( { object => $object, value => $map_callbacks->{write}->{$node}, storage => $self->{meta}->{$descent}->{storage} } );';  
       #print $evalstring, "\n"; exit;  
       eval($evalstring);  
       if ($@) {  
         $error = 1;  
         print $@, "\n";  
       }  
         
       #print "after eval", "\n";  
         
       if (!$error) {  
         # re-update@orm  
         $self->{meta}->{$descent}->{storage}->update($object);  
       }  
     }  
     
     # handle errors  
     if ($error) {  
       #print "error", "\n";  
 =pod  
       my $sqlHandle;  
       #if ($self->{args}->{debug}) { print "sql-error with statement: $sql_main", "\n"; }  
       $self->{node}->{status}->{error} = {  
         statement => $sql_main,  
         state => $sqlHandle->state,  
         err => $sqlHandle->err,  
         errstr => $sqlHandle->errstr,  
       };  
 =cut  
       # rollback....  
       #print "rollback", "\n";  
       $self->{meta}->{$descent}->{storage}->erase($object);  
       #print "after rollback", "\n";  
     } else {  
       $self->{node}->{status}->{ok} = 1;  
     }  
   
   
   }  
   
 }  
   
 # TODO:  
 # this should be split up into...  
 #   - a "_statNode" (should just touch the node to check for existance)  
 #   - a "_loadNode" (should load node completely)  
 #   - maybe additionally a "loadNodeProperty" (may specify properties to load)  
 #   - introduce $self->{nodecache} for this purpose  
 # TODO:  
 #   should we:  
 #     - not pass ident in here but resolve it via "$descent"?  
 #     - refactor this and stuff it with additional debug/error message  
 #       - this = the way the implicit load mechanism works  
 sub _statloadNode {  
   
   my $self = shift;  
   my $descent = shift;  
   my $ident = shift;  
   my $force = shift;  
   
   # fetch entry to retrieve checksum from  
   # was:  
   if (!$self->{node}->{$descent} || $force) {  
   # is:  
   #if (!$self->{node}->{$descent}->{item} || $force) {  
       
     if (!$ident) {  
       #print "\n", "Attempt to fetch entry implicitely by ident failed: no ident given! This may result in an insert if no write-protection is in the way.", "\n";  
       return;  
     }  
       
     # patch for DBD::CSV  
     if ($ident && $ident eq 'Null') {  
       return;  
     }  
   
 #print "yai!", "\n";  
   
     my $query = {  
       node => $self->{meta}->{$descent}->{node},  
       subnodes => [qw( cs )],  
       criterias => [  
         { key => $self->{meta}->{$descent}->{IdentProvider}->{arg},  
            op => 'eq',  
            val => $ident },  
       ]  
     };  
   
 #print Dumper($query);  
   
     my $result = $self->{meta}->{$descent}->{storage}->sendQuery($query);  
   
     my $entry = $result->getNextEntry();  
   
 #print Dumper($entry);  
 #print "pers: " . $self->{meta}->{$descent}->{storage}->is_persistent($entry), "\n";  
 #my $state = $self->{meta}->{$descent}->{storage}->_fetch_object_state($entry, { name => 'TransactionHop' } );  
 #print Dumper($state);  
   
     my $status = $result->getStatus();  
   
 #print Dumper($status);  
494            
495      # TODO: enhance error handling (store inside tc)    # trace
496      #if (!$row) {      #print Dumper($self->{meta});
497      #  print "\n", "row error", "\n";      #print Dumper($self->{node});
498      #  next;      #exit;
     #}  
   
     # these checks run before actually loading payload- and meta-data to node-container  
499            
       # 1st level - hard error  
       if ($status && $status->{err}) {  
         $logger->debug( __PACKAGE__ . "->_statloadNode (ident=\"$ident\") failed - hard error (that's ok): $status->{err}" );  
         return;  
       }  
     
       # 2nd level - logical (empty/notfound) error  
       if (($status && $status->{empty}) || !$entry) {  
         $logger->debug( __PACKAGE__ . "->_statloadNode (ident=\"$ident\") failed - logical error (that's ok)" );  
         #print "no entry (logical)", "\n";  
         return;  
       }  
   
 #print Dumper($entry);  
   
     # was:  
     # $self->{node}->{$descent}->{ident} = $ident;    
     # is:  
     # TODO: re-resolve ident from entry via metadata "IdentProvider" here - like elsewhere  
     $self->{node}->{$descent}->{ident} = $ident;  
     $self->{node}->{$descent}->{payload} = $entry;  
   
   }  
     
   return 1;  
     
 }  
   
 sub _doTransferToTarget {  
   my $self = shift;  
   my $action = shift;  
500    $self->_modifyNode('target', $action, $self->{node}->{map});    $self->_modifyNode('target', $action, $self->{node}->{map});
501  }  }
502    
503    
504  sub _doModifySource_IdentChecksum {  sub _doModifySource_IdentChecksum {
505    my $self = shift;    my $self = shift;
506    my $ident_new = shift;    my $ident_new = shift;
# Line 1093  sub _doModifySource_IdentChecksum { Line 520  sub _doModifySource_IdentChecksum {
520    $self->_modifyNode('source', 'update', $map);    $self->_modifyNode('source', 'update', $map);
521  }  }
522    
523    sub _doModify_LocalChecksum {
 # this is a shortcut method  
 # ... let's try to avoid _any_ redundant code in here (ok... - at the cost of method lookups...)  
 sub _getNodeList {  
524    my $self = shift;    my $self = shift;
525    my $descent = shift;    my $descent = shift;
526    my $filter = shift;    my $map = {
527    return $self->{meta}->{$descent}->{storage}->getListFiltered($self->{meta}->{$descent}->{node}, $filter);      cs_local => $self->{node}->{$descent}->{checksum_local_calculated},
528      };
529      $self->_modifyNode($descent, 'update', $map);
530  }  }
531    
   
532  sub _prepareNode_MetaProperties {  sub _prepareNode_MetaProperties {
533    my $self = shift;    my $self = shift;
534    my $descent = shift;    my $descent = shift;
# Line 1138  sub _prepareNode_MetaProperties { Line 563  sub _prepareNode_MetaProperties {
563    
564  }  }
565    
566    # TODO: load column-metadata from reversed mapping-metadata
567  sub _prepareNode_DummyIdent {  sub _prepareNode_DummyIdent {
568    my $self = shift;    my $self = shift;
569    my $descent = shift;    my $descent = shift;
570    
571      #print Dumper($self->{options});
572    $logger->info( __PACKAGE__ . "->_prepareNode_DummyIdent( descent $descent )" );    $logger->info( __PACKAGE__ . "->_prepareNode_DummyIdent( descent $descent )" );
573    
574    my $list = $self->_getNodeList($descent);    my $list = $self->_getNodeList($descent);
# Line 1155  sub _prepareNode_DummyIdent { Line 582  sub _prepareNode_DummyIdent {
582      my $map = {      my $map = {
583        $self->{meta}->{$descent}->{IdentProvider}->{arg} => $ident_dummy,        $self->{meta}->{$descent}->{IdentProvider}->{arg} => $ident_dummy,
584        cs => undef,        cs => undef,
585          cs_local => undef,
586      };      };
587            
588      # diff lists and ...      # diff lists and ...
# Line 1196  sub _otherSide { Line 624  sub _otherSide {
624    return '';    return '';
625  }  }
626    
 sub _erase_all {  
   my $self = shift;  
   my $descent = shift;  
   #my $node = shift;  
   my $node = $self->{meta}->{$descent}->{node};  
   $self->{meta}->{$descent}->{storage}->eraseAll($node);  
 }  
   
   
 =pod  
   
   
 =head1 DESCRIPTION  
   
 Data::Transfer::Sync is a module providing a generic synchronization process  
 across arbitrary/multiple storages based on a ident/checksum mechanism.  
 It sits on top of Data::Storage.  
   
   
 =head1 REQUIREMENTS  
   
   For full functionality:  
     Data::Storage  
     Data::Transform  
     Data::Compare  
     ... and all their dependencies  
   
   
 =head1 AUTHORS / COPYRIGHT  
   
 The Data::Storage module is Copyright (c) 2002 Andreas Motl.  
 All rights reserved.  
   
 You may distribute it under the terms of either the GNU General Public  
 License or the Artistic License, as specified in the Perl README file.  
   
   
 =head1 SUPPORT / WARRANTY  
   
 Data::Storage is free software. IT COMES WITHOUT WARRANTY OF ANY KIND.  
   
   
   
 =head1 BUGS  
   
   When in "import" mode for windows file - DBD::AutoCSV may hang.  
   Hint: Maybe the source node contains an ident-, but no checksum-column?  
   
   
 =head1 USER LEVEL ERRORS  
   
 =head4 Mapping  
   
   - - - - - - - - - - - - - - - - - - - - - - - - - -  
   info: BizWorks::Process::Setup->syncResource( source_node Currency mode PULL erase 0 import 0 )critical: BizWorks::Process::Setup->startSync: Can't access mapping for node "Currency" - please check BizWorks::ResourceMapping.  
   - - - - - - - - - - - - - - - - - - - - - - - - - -  
   You have to create a sub for each node used in synchronization inside named Perl module. The name of this sub _must_ match  
   the name of the node you want to sync. This sub holds mapping metadata to give the engine hints about how  
   to access the otherwise generic nodes.  
   - - - - - - - - - - - - - - - - - - - - - - - - - -  
   
   
 =head4 DBD::AutoCSV's rulebase  
     
   - - - - - - - - - - - - - - - - - - - - - - - - - -  
   info: BizWorks::Process::Setup->syncResource( source_node Currency mode PULL erase 0 import 0 )  
   info: Data::Transfer::Sync->syncNodes: source=L/Currency <- target=R/currencies.csv  
     
   Execution ERROR: Error while scanning: Missing first row or scanrule not applied. at C:/home/amo/develop/netfrag.org/nfo/perl/libs/DBD/CSV.p  
   m line 165, <GEN9> line 1.  
    called from C:/home/amo/develop/netfrag.org/nfo/perl/libs/Data/Storage/Handler/DBI.pm at 123.  
     
   DBI-Error: DBD::AutoCSV::st fetchrow_hashref failed: Attempt to fetch row from a Non-SELECT statement  
   notice: Data::Transfer::Sync->syncNodes: No nodes to synchronize.  
   - - - - - - - - - - - - - - - - - - - - - - - - - -  
   DBD::AutoCSV contains a rulebase which is spooled down while attempting to guess the style of the csv file regarding  
   parameters like newline (eol), column-seperation-character (sep_char), quoting character (quote_char).  
   If this spool runs out of entries and no style could be resolved, DBD::CSV dies causing this "Execution ERROR" which  
   results in a "DBI-Error" afterwards.  
   - - - - - - - - - - - - - - - - - - - - - - - - - -  
   
   
 =head4 Check structure of source node  
     
   - - - - - - - - - - - - - - - - - - - - - - - - - -  
   info: Data::Transfer::Sync->syncNodes: source=L/Currency <- target=R/currencies.csv  
   critical: Data::Transfer::Sync->syncNodes: Can not synchronize: No ident found in source node, maybe try to "import" this node first.  
   - - - - - - - - - - - - - - - - - - - - - - - - - -  
   If lowlevel detection succeeds, but no other required informations are found, this message is issued.  
   "Other informations" might be:  
     - column-header-row completely missing  
     - ident column is empty  
   - - - - - - - - - - - - - - - - - - - - - - - - - -  
   
   
 =head4 Modify structure of source node  
     
   - - - - - - - - - - - - - - - - - - - - - - - - - -  
   info: Data::Transfer::Sync->syncNodes: source=L/Currency <- target=R/currencies.csv  
   info: Data::Transfer::Sync->_prepareNode_MetaProperties( descent source )  
   warning: Data::Transfer::Sync->_prepareNode_MetaProperties: node is lacking meta properties - will try to alter...  
   SQL ERROR: Command 'ALTER' not recognized or not supported!  
     
   SQL ERROR: Command 'ALTER' not recognized or not supported!  
   - - - - - - - - - - - - - - - - - - - - - - - - - -  
   The Engine found a node which structure does not match the required. It tries to alter this automatically - only when doing "import" -  
   but the DBD driver (in this case DBD::CSV) gets in the way croaking not to be able to do this.  
   This could also appear if your database connection has insufficient rights to modify the database structure.  
   DBD::CSV croaks because it doesn't implement the ALTER command, so please edit your columns manually.  
   Hint: Add columns with the names of your "ident" and "checksum" property specifications.  
   - - - - - - - - - - - - - - - - - - - - - - - - - -  
   
   
 =head4 Load source node by ident  
   
   - - - - - - - - - - - - - - - - - - - - - - - - - -  
   info: Data::Transfer::Sync->_prepareNode_DummyIdent( descent source )  
   pcritical: Data::Transfer::Sync->_modifyNode failed: "source" node is empty.  
   - - - - - - - - - - - - - - - - - - - - - - - - - -  
   The source node could not be loaded. Maybe the ident is missing. Please check manually.  
   Hint: Like above, the ident and/or checksum columns may be missing....  
   - - - - - - - - - - - - - - - - - - - - - - - - - -  
   
   
 =head1 TODO  
   
   - sub _resolveIdentProvider  
   - wrap _doModifySource and _doTransferTarget around a core function which can change virtually any type of node  
   - split this module up into Sync.pm, Sync/Core.pm, Sync/Compare.pm and Sync/Compare/Checksum.pm  
      - introduce _compareNodes as a core method and wrap it around methods in Sync/Compare/Checksum.pm  
   - introduce Sync/Compare/MyComparisonImplementation.pm  
   - some generic deferring method - e.g. "$self->defer(action)" - to be able to accumulate a bunch of actions for later processing  
      - this implies everything done is _really_ split up into generic actions - how else would we defer them???  
      - example uses:  
         - fetch whole checksum list from node  
         - remember source ident retransmits  
      - remember: this is convenient - and maybe / of course faster - but we'll loose "per-node-atomic" operations  
   - feature: mechanism to implicit inject checksum property to nodes (alter table / modify schema)  
   - expand statistics / keep track of:  
     - touched/untouched nodes  
   - full sync  
     - just do a push and a pull for now but use stats for touched nodes in between to speed up things  
   - introduce some new metadata flags for a synchronization partner which is (e.g.) of "source" or "target":  
     - isNewNodePropagator  
     - isWriteProtected  
   
   
 =cut  
627    
628  1;  1;
629    __END__

Legend:
Removed from v.1.1  
changed lines
  Added in v.1.12

MailToCvsAdmin">MailToCvsAdmin
ViewVC Help
Powered by ViewVC 1.1.26 RSS 2.0 feed