4 |
# |
# |
5 |
# See COPYRIGHT section in pod text below for usage and distribution rights. |
# See COPYRIGHT section in pod text below for usage and distribution rights. |
6 |
# |
# |
7 |
################################# |
############################################ |
8 |
# |
# |
9 |
# $Log$ |
# $Log$ |
10 |
|
# Revision 1.9 2002/12/01 22:15:45 joko |
11 |
|
# - sub createDb: moved to handler |
12 |
|
# |
13 |
|
# Revision 1.8 2002/11/29 04:48:23 joko |
14 |
|
# + updated pod |
15 |
|
# |
16 |
# Revision 1.7 2002/11/17 06:07:18 joko |
# Revision 1.7 2002/11/17 06:07:18 joko |
17 |
# + creating the handler is easier than proposed first - for now :-) |
# + creating the handler is easier than proposed first - for now :-) |
18 |
# + sub testAvailability |
# + sub testAvailability |
40 |
# Revision 1.1 2002/10/10 03:43:12 cvsjoko |
# Revision 1.1 2002/10/10 03:43:12 cvsjoko |
41 |
# + new |
# + new |
42 |
# |
# |
43 |
################################# |
############################################ |
44 |
|
|
|
# aim_V1: should encapsulate Tangram, DBI, DBD::CSV and LWP:: to access them in an unordinary way ;) |
|
|
# aim_V2: introduce a generic layered structure, refactor *SUBLAYER*-stuff, make (e.g.) this possible: |
|
|
# - Perl Data::Storage[DBD::CSV] -> Perl LWP:: -> Internet HTTP/FTP/* -> Host Daemon -> csv-file |
|
45 |
|
|
46 |
BEGIN { |
BEGIN { |
47 |
$Data::Storage::VERSION = 0.01; |
$Data::Storage::VERSION = 0.02; |
48 |
} |
} |
49 |
|
|
50 |
|
|
52 |
|
|
53 |
Data::Storage - Interface for accessing various Storage implementations for Perl in an independent way |
Data::Storage - Interface for accessing various Storage implementations for Perl in an independent way |
54 |
|
|
55 |
|
|
56 |
|
=head1 AIMS |
57 |
|
|
58 |
|
- should encapsulate Tangram, DBI, DBD::CSV and LWP:: to access them in an unordinary (more convenient) way ;) |
59 |
|
- introduce a generic layered structure, refactor *SUBLAYER*-stuff, make (e.g.) this possible: |
60 |
|
Perl Data::Storage[DBD::CSV] -> Perl LWP:: -> Internet HTTP/FTP/* -> Host Daemon -> csv-file |
61 |
|
- provide generic synchronization mechanisms across arbitrary/multiple storages based on ident/checksum |
62 |
|
maybe it's possible to have schema-, structural- and semantical modifications synchronized??? |
63 |
|
|
64 |
|
|
65 |
=head1 SYNOPSIS |
=head1 SYNOPSIS |
66 |
|
|
67 |
... the basic way: |
=head2 BASIC ACCESS |
68 |
|
|
69 |
|
=head2 ADVANCED ACCESS |
70 |
|
|
71 |
... via inheritance: |
... via inheritance: |
72 |
|
|
84 |
$self->{storage}->insert($proxyObj); |
$self->{storage}->insert($proxyObj); |
85 |
|
|
86 |
|
|
87 |
|
=head2 SYNCHRONIZATION |
88 |
|
|
89 |
|
my $nodemapping = { |
90 |
|
'LangText' => 'langtexts.csv', |
91 |
|
'Currency' => 'currencies.csv', |
92 |
|
'Country' => 'countries.csv', |
93 |
|
}; |
94 |
|
|
95 |
|
my $propmapping = { |
96 |
|
'LangText' => [ |
97 |
|
[ 'source:lcountrykey' => 'target:country' ], |
98 |
|
[ 'source:lkey' => 'target:key' ], |
99 |
|
[ 'source:lvalue' => 'target:text' ], |
100 |
|
], |
101 |
|
'Currency' => [ |
102 |
|
[ 'source:ckey' => 'target:key' ], |
103 |
|
[ 'source:cname' => 'target:text' ], |
104 |
|
], |
105 |
|
'Country' => [ |
106 |
|
[ 'source:ckey' => 'target:key' ], |
107 |
|
[ 'source:cname' => 'target:text' ], |
108 |
|
], |
109 |
|
}; |
110 |
|
|
111 |
|
sub syncResource { |
112 |
|
|
113 |
|
my $self = shift; |
114 |
|
my $node_source = shift; |
115 |
|
my $mode = shift; |
116 |
|
my $opts = shift; |
117 |
|
|
118 |
|
$mode ||= ''; |
119 |
|
$opts->{erase} ||= 0; |
120 |
|
|
121 |
|
$logger->info( __PACKAGE__ . "->syncResource( node_source $node_source mode $mode erase $opts->{erase} )"); |
122 |
|
|
123 |
|
# resolve metadata for syncing requested resource |
124 |
|
my $node_target = $nodemapping->{$node_source}; |
125 |
|
my $mapping = $propmapping->{$node_source}; |
126 |
|
|
127 |
|
if (!$node_target || !$mapping) { |
128 |
|
# loggger.... "no target, sorry!" |
129 |
|
print "error while resolving resource metadata", "\n"; |
130 |
|
return; |
131 |
|
} |
132 |
|
|
133 |
|
if ($opts->{erase}) { |
134 |
|
$self->_erase_all($node_source); |
135 |
|
} |
136 |
|
|
137 |
|
# create new sync object |
138 |
|
my $sync = Data::Transfer::Sync->new( |
139 |
|
storages => { |
140 |
|
L => $self->{bizWorks}->{backend}, |
141 |
|
R => $self->{bizWorks}->{resources}, |
142 |
|
}, |
143 |
|
id_authorities => [qw( L ) ], |
144 |
|
checksum_authorities => [qw( L ) ], |
145 |
|
write_protected => [qw( R ) ], |
146 |
|
verbose => 1, |
147 |
|
); |
148 |
|
|
149 |
|
# sync |
150 |
|
# todo: filter!? |
151 |
|
$sync->syncNodes( { |
152 |
|
direction => $mode, # | +PUSH | +PULL | -FULL | +IMPORT | -EXPORT |
153 |
|
method => 'checksum', # | -timestamp | -manual |
154 |
|
source => "L:$node_source", |
155 |
|
source_ident => 'storage_method:id', |
156 |
|
source_exclude => [qw( id cs )], |
157 |
|
target => "R:$node_target", |
158 |
|
target_ident => 'property:oid', |
159 |
|
mapping => $mapping, |
160 |
|
} ); |
161 |
|
|
162 |
|
} |
163 |
|
|
164 |
|
|
165 |
=head2 NOTE |
=head2 NOTE |
166 |
|
|
167 |
This module heavily relies on DBI and Tangram, but adds a lot of additional bugs and quirks. |
This module heavily relies on DBI and Tangram, but adds a lot of additional bugs and quirks. |
170 |
|
|
171 |
=head1 REQUIREMENTS |
=head1 REQUIREMENTS |
172 |
|
|
173 |
For full functionality: |
For full functionality: |
174 |
DBI from CPAN |
DBI from CPAN |
175 |
Tangram from CPAN |
DBD::mysql from CPAN |
176 |
Class::Tangram from CPAN |
Tangram 2.04 from CPAN (hmmm, 2.04 won't do in some cases) |
177 |
MySQL::Diff from http://adamspiers.org/computing/mysqldiff/ |
Tangram 2.05 from http://... (2.05 seems okay but there are also additional patches from our side) |
178 |
... and all their dependencies |
Class::Tangram from CPAN |
179 |
|
DBD::CSV from CPAN |
180 |
|
MySQL::Diff from http://adamspiers.org/computing/mysqldiff/ |
181 |
|
... and all their dependencies |
182 |
|
|
183 |
=cut |
=cut |
184 |
|
|
221 |
# - Deep recursion on subroutine "Data::Storage::AUTOLOAD" |
# - Deep recursion on subroutine "Data::Storage::AUTOLOAD" |
222 |
# - Deep recursion on subroutine "Data::Storage::Handler::Abstract::AUTOLOAD" |
# - Deep recursion on subroutine "Data::Storage::Handler::Abstract::AUTOLOAD" |
223 |
# - Deep recursion on anonymous subroutine at [...] |
# - Deep recursion on anonymous subroutine at [...] |
224 |
# we also might filter log messages caused by logging itself in "advanced logging of AUTOLOAD calls" |
# we also might filter log messages caused by logging to itself in "advanced logging of AUTOLOAD calls" |
225 |
|
|
226 |
my $self = shift; |
my $self = shift; |
227 |
our $AUTOLOAD; |
our $AUTOLOAD; |
243 |
$logstring .= "\t" x $tabcount . "(AUTOLOAD)"; |
$logstring .= "\t" x $tabcount . "(AUTOLOAD)"; |
244 |
# TODO: only ok if logstring doesn't contain |
# TODO: only ok if logstring doesn't contain |
245 |
# e.g. "Data::Storage[Tangram]->insert(SystemEvent=HASH(0x5c0034c)) (AUTOLOAD)" |
# e.g. "Data::Storage[Tangram]->insert(SystemEvent=HASH(0x5c0034c)) (AUTOLOAD)" |
246 |
# but that would be way too specific as long as we don't have an abstract handler for this ;) |
# but that would be _way_ too specific as long as we don't have an abstract handler for this ;) |
247 |
$logger->debug( $logstring ); |
$logger->debug( $logstring ); |
248 |
#print join('; ', @_); |
#print join('; ', @_); |
249 |
} |
} |
250 |
|
|
251 |
# filtering AUTOLOAD calls |
# filtering AUTOLOAD calls and first-time-touch of the actual storage impl |
252 |
if ($self->_filter_AUTOLOAD($method)) { |
if ($self->_filter_AUTOLOAD($method)) { |
253 |
#print "_accessStorage\n"; |
#print "_accessStorage\n"; |
254 |
$self->_accessStorage(); |
$self->_accessStorage(); |
308 |
#my @args = %{$self->{locator}}; |
#my @args = %{$self->{locator}}; |
309 |
my @args = (); |
my @args = (); |
310 |
|
|
311 |
# create new storage handle object, propagate arguments to handler |
# - create new storage handle object |
312 |
# pass locator by reference to be able to store status information in it |
# - propagate arguments to handler |
313 |
|
# - pass locator by reference to be able to store status- or meta-information in it |
314 |
$self->{STORAGEHANDLE} = $pkg->new( locator => $self->{locator}, @args ); |
$self->{STORAGEHANDLE} = $pkg->new( locator => $self->{locator}, @args ); |
315 |
|
|
316 |
} |
} |
344 |
} |
} |
345 |
|
|
346 |
sub removeLogDispatchHandler { |
sub removeLogDispatchHandler { |
347 |
|
my $self = shift; |
348 |
my $self = shift; |
my $name = shift; |
349 |
my $name = shift; |
#my $logger = shift; |
350 |
#my $logger = shift; |
$logger->remove($name); |
|
|
|
|
$logger->remove($name); |
|
|
|
|
351 |
} |
} |
352 |
|
|
353 |
sub getDbName { |
sub getDbName { |
365 |
if ( my $dbh = DBI->connect($dsn, '', '', { |
if ( my $dbh = DBI->connect($dsn, '', '', { |
366 |
PrintError => 0, |
PrintError => 0, |
367 |
} ) ) { |
} ) ) { |
368 |
|
|
369 |
|
# TODO: REVIEW |
370 |
$dbh->disconnect(); |
$dbh->disconnect(); |
371 |
|
|
372 |
return 1; |
return 1; |
373 |
} else { |
} else { |
374 |
$logger->warning( __PACKAGE__ . "[$self->{locator}->{type}]" . "->testDsn(): " . "DBI-error: " . $DBI::errstr ); |
$logger->warning( __PACKAGE__ . "[$self->{locator}->{type}]" . "->testDsn(): " . "DBI-error: " . $DBI::errstr ); |
382 |
return $status; |
return $status; |
383 |
} |
} |
384 |
|
|
|
sub createDb { |
|
|
my $self = shift; |
|
|
my $dsn = $self->{locator}->{dbi}->{dsn}; |
|
|
|
|
|
$logger->debug( __PACKAGE__ . "->createDb( dsn $dsn )" ); |
|
|
|
|
|
$dsn =~ s/database=(.+?);//; |
|
|
my $database_name = $1; |
|
|
|
|
|
my $ok; |
|
|
|
|
|
if ( my $dbh = DBI->connect($dsn, '', '', { |
|
|
PrintError => 0, |
|
|
} ) ) { |
|
|
if ($database_name) { |
|
|
if ($dbh->do("CREATE DATABASE $database_name;")) { |
|
|
$ok = 1; |
|
|
} |
|
|
} |
|
|
$dbh->disconnect(); |
|
|
} |
|
|
|
|
|
return $ok; |
|
|
|
|
|
} |
|
385 |
|
|
386 |
sub dropDb { |
sub dropDb { |
387 |
my $self = shift; |
my $self = shift; |
402 |
$ok = 1; |
$ok = 1; |
403 |
} |
} |
404 |
} |
} |
405 |
|
|
406 |
$dbh->disconnect(); |
$dbh->disconnect(); |
407 |
|
|
408 |
} |
} |
409 |
|
|
410 |
return $ok; |
return $ok; |
421 |
|
|
422 |
=head1 DESCRIPTION |
=head1 DESCRIPTION |
423 |
|
|
424 |
Data::Storage is module for a accessing various "data structures" stored inside |
Data::Storage is a module for accessing various "data structures" stored inside |
425 |
various "data containers". It sits on top of DBI and/or Tangram. |
various "data containers". It sits on top of DBI and/or Tangram. |
426 |
|
|
427 |
|
|
437 |
=head1 ACKNOWLEDGEMENTS |
=head1 ACKNOWLEDGEMENTS |
438 |
|
|
439 |
Larry Wall for Perl, Tim Bunce for DBI, Jean-Louis Leroy for Tangram and Set::Object, |
Larry Wall for Perl, Tim Bunce for DBI, Jean-Louis Leroy for Tangram and Set::Object, |
440 |
Sam Vilain for Class::Tangram, Adam Spiers for MySQL::Diff and all contributors. |
Sam Vilain for Class::Tangram, Jochen Wiedmann and Jeff Zucker for DBD::CSV and related, |
441 |
|
Adam Spiers for MySQL::Diff and all contributors. |
442 |
|
|
443 |
|
|
444 |
=head1 SUPPORT / WARRANTY |
=head1 SUPPORT / WARRANTY |
449 |
=head1 TODO |
=head1 TODO |
450 |
|
|
451 |
|
|
452 |
=head2 Handle the following errors/cases: |
=head2 BUGS |
453 |
|
|
454 |
|
"DBI-Error [Tangram]: DBD::mysql::st execute failed: Unknown column 't1.requestdump' in 'field list'" |
455 |
|
|
456 |
=head3 "DBI-Error [Tangram]: DBD::mysql::st execute failed: Unknown column 't1.requestdump' in 'field list'" |
... occours when operating on object-attributes not introduced yet: |
457 |
|
this should be detected and appended/replaced through: |
458 |
|
"Schema-Error detected, maybe (just) an inconsistency. |
459 |
|
Please check if your declaration in schema-module "a" matches structure in database "b" or try to run" |
460 |
|
db_setup.pl --dbkey=import --action=deploy |
461 |
|
|
|
... occours when operating on object-attributes not introduced yet: |
|
|
this should be detected and appended/replaced through: |
|
|
"Schema-Error detected, maybe (just) an inconsistency. |
|
|
Please check if your declaration in schema-module "a" matches structure in database "b" or try to run" |
|
|
db_setup.pl --dbkey=import --action=deploy |
|
462 |
|
|
463 |
=head3 Compare schema (structure diff) with database ... |
Compare schema (structure diff) with database ... |
464 |
|
|
465 |
... when issuing "db_setup.pl --dbkey=import --action=deploy" |
... when issuing "db_setup.pl --dbkey=import --action=deploy" |
466 |
on a database with an already deployed schema, use an additional "--update" then |
on a database with an already deployed schema, use an additional "--update" then |
489 |
automatically and this is believed to be the most common case under normal circumstances. |
automatically and this is believed to be the most common case under normal circumstances. |
490 |
|
|
491 |
|
|
492 |
=head2 Introduce some features: |
=head2 FEATURES |
493 |
|
|
494 |
- Get this stuff together with UML (Unified Modeling Language) and/or standards from ODMG. |
- Get this stuff together with UML (Unified Modeling Language) and/or standards from ODMG. |
495 |
- Make it possible to load/save schemas in XMI (XML Metadata Interchange), |
- Make it possible to load/save schemas in XMI (XML Metadata Interchange), |
497 |
Integrate/bundle this with a web-/html-based UML modeling tool or |
Integrate/bundle this with a web-/html-based UML modeling tool or |
498 |
some other interesting stuff like the "Co-operative UML Editor" from Uni Darmstadt. (web-/java-based) |
some other interesting stuff like the "Co-operative UML Editor" from Uni Darmstadt. (web-/java-based) |
499 |
- Enable Round Trip Engineering. Keep code and diagrams in sync. Don't annoy/bother the programmers. |
- Enable Round Trip Engineering. Keep code and diagrams in sync. Don't annoy/bother the programmers. |
500 |
- Add some more handlers: |
- Add support for some more handlers/locators to be able to |
501 |
- look at DBD::CSV, Text::CSV, XML::CSV, XML::Excel |
access the following standards/protocols/interfaces/programs/apis transparently: |
502 |
- Add some more locations/locators: |
+ DBD::CSV (via Data::Storage::Handler::DBI) |
503 |
- PerlDAV: http://www.webdav.org/perldav/ |
(-) Text::CSV, XML::CSV, XML::Excel |
504 |
- Move to t3, use InCASE |
- MAPI |
505 |
|
- LDAP |
506 |
|
- DAV (look at PerlDAV: http://www.webdav.org/perldav/) |
507 |
|
- Mbox (use formail for seperating/splitting entries/nodes) |
508 |
|
- Cyrus (cyrdeliver - what about cyrretrieve (export)???) |
509 |
|
- use File::DiffTree, use File::Compare |
510 |
|
- Hibernate |
511 |
|
- "Win32::UserAccountDb" |
512 |
|
- "*nix::UserAccountDb" |
513 |
|
- .wab - files (Windows Address Book) |
514 |
|
- .pst - files (Outlook Post Storage?) |
515 |
|
- XML (e.g. via XML::Simple?) |
516 |
|
- Move to t3, look at InCASE |
517 |
|
|
518 |
|
|
519 |
=head3 Links: |
=head3 LINKS / REFERENCES |
520 |
|
|
521 |
Specs: |
Specs: |
522 |
UML 1.3 Spec: http://cgi.omg.org/cgi-bin/doc?ad/99-06-08.pdf |
UML 1.3 Spec: http://cgi.omg.org/cgi-bin/doc?ad/99-06-08.pdf |