4 |
# |
# |
5 |
# See COPYRIGHT section in pod text below for usage and distribution rights. |
# See COPYRIGHT section in pod text below for usage and distribution rights. |
6 |
# |
# |
7 |
################################# |
############################################ |
8 |
# |
# |
9 |
# $Log$ |
# $Log$ |
10 |
|
# Revision 1.13 2002/12/17 21:54:12 joko |
11 |
|
# + feature when using Tangram: |
12 |
|
# + what? each object created should delivered with a globally(!?) unique identifier (GUID) besides the native tangram object id (OID) |
13 |
|
# + patched Tangram::Storage (jonen) |
14 |
|
# + enhanced Data::Storage::Schema::Tangram (joko) |
15 |
|
# + enhanced Data::Storage::Handler::Tangram 'sub getObjectByGuid' (jonen) |
16 |
|
# + how? |
17 |
|
# + each concrete (non-abstract) class gets injected with an additional field/property called 'guid' - this is done (dynamically) on schema level |
18 |
|
# + this property ('guid') gets filled on object creation/insertion from 'sub Tangram::Storage::_insert' using Data::UUID from CPAN |
19 |
|
# + (as for now) this property can get accessed by calling 'getObjectByGuid' on the already known storage-handle used throughout the application |
20 |
|
# |
21 |
|
# Revision 1.12 2002/12/12 02:50:15 joko |
22 |
|
# + this now (unfortunately) needs DBI for some helper functions |
23 |
|
# + TODO: these have to be refactored to another scope! (soon!) |
24 |
|
# |
25 |
|
# Revision 1.11 2002/12/11 06:53:19 joko |
26 |
|
# + updated pod |
27 |
|
# |
28 |
|
# Revision 1.10 2002/12/07 03:37:23 joko |
29 |
|
# + updated pod |
30 |
|
# |
31 |
|
# Revision 1.9 2002/12/01 22:15:45 joko |
32 |
|
# - sub createDb: moved to handler |
33 |
|
# |
34 |
|
# Revision 1.8 2002/11/29 04:48:23 joko |
35 |
|
# + updated pod |
36 |
|
# |
37 |
# Revision 1.7 2002/11/17 06:07:18 joko |
# Revision 1.7 2002/11/17 06:07:18 joko |
38 |
# + creating the handler is easier than proposed first - for now :-) |
# + creating the handler is easier than proposed first - for now :-) |
39 |
# + sub testAvailability |
# + sub testAvailability |
61 |
# Revision 1.1 2002/10/10 03:43:12 cvsjoko |
# Revision 1.1 2002/10/10 03:43:12 cvsjoko |
62 |
# + new |
# + new |
63 |
# |
# |
64 |
################################# |
############################################ |
65 |
|
|
|
# aim_V1: should encapsulate Tangram, DBI, DBD::CSV and LWP:: to access them in an unordinary way ;) |
|
|
# aim_V2: introduce a generic layered structure, refactor *SUBLAYER*-stuff, make (e.g.) this possible: |
|
|
# - Perl Data::Storage[DBD::CSV] -> Perl LWP:: -> Internet HTTP/FTP/* -> Host Daemon -> csv-file |
|
66 |
|
|
67 |
BEGIN { |
BEGIN { |
68 |
$Data::Storage::VERSION = 0.01; |
$Data::Storage::VERSION = 0.02; |
69 |
} |
} |
70 |
|
|
71 |
|
|
72 |
=head1 NAME |
=head1 NAME |
73 |
|
|
74 |
Data::Storage - Interface for accessing various Storage implementations for Perl in an independent way |
Data::Storage - Interface for accessing various Storage implementations for Perl in an independent way |
75 |
|
|
76 |
|
|
77 |
|
=head1 AIMS |
78 |
|
|
79 |
|
- should encapsulate Tangram, DBI, DBD::CSV and LWP:: to access them in an unordinary (more convenient) way ;) |
80 |
|
- introduce a generic layered structure, refactor *SUBLAYER*-stuff, make (e.g.) this possible: |
81 |
|
Perl Data::Storage[DBD::CSV] -> Perl LWP:: -> Internet HTTP/FTP/* -> Host Daemon -> csv-file |
82 |
|
- provide generic synchronization mechanisms across arbitrary/multiple storages based on ident/checksum |
83 |
|
maybe it's possible to have schema-, structural- and semantical modifications synchronized??? |
84 |
|
|
85 |
|
|
86 |
=head1 SYNOPSIS |
=head1 SYNOPSIS |
87 |
|
|
88 |
... the basic way: |
=head2 BASIC ACCESS |
89 |
|
|
90 |
|
=head2 ADVANCED ACCESS |
91 |
|
|
92 |
... via inheritance: |
... via inheritance: |
93 |
|
|
105 |
$self->{storage}->insert($proxyObj); |
$self->{storage}->insert($proxyObj); |
106 |
|
|
107 |
|
|
108 |
|
=head2 SYNCHRONIZATION |
109 |
|
|
110 |
|
my $nodemapping = { |
111 |
|
'LangText' => 'langtexts.csv', |
112 |
|
'Currency' => 'currencies.csv', |
113 |
|
'Country' => 'countries.csv', |
114 |
|
}; |
115 |
|
|
116 |
|
my $propmapping = { |
117 |
|
'LangText' => [ |
118 |
|
[ 'source:lcountrykey' => 'target:country' ], |
119 |
|
[ 'source:lkey' => 'target:key' ], |
120 |
|
[ 'source:lvalue' => 'target:text' ], |
121 |
|
], |
122 |
|
'Currency' => [ |
123 |
|
[ 'source:ckey' => 'target:key' ], |
124 |
|
[ 'source:cname' => 'target:text' ], |
125 |
|
], |
126 |
|
'Country' => [ |
127 |
|
[ 'source:ckey' => 'target:key' ], |
128 |
|
[ 'source:cname' => 'target:text' ], |
129 |
|
], |
130 |
|
}; |
131 |
|
|
132 |
|
sub syncResource { |
133 |
|
|
134 |
|
my $self = shift; |
135 |
|
my $node_source = shift; |
136 |
|
my $mode = shift; |
137 |
|
my $opts = shift; |
138 |
|
|
139 |
|
$mode ||= ''; |
140 |
|
$opts->{erase} ||= 0; |
141 |
|
|
142 |
|
$logger->info( __PACKAGE__ . "->syncResource( node_source $node_source mode $mode erase $opts->{erase} )"); |
143 |
|
|
144 |
|
# resolve metadata for syncing requested resource |
145 |
|
my $node_target = $nodemapping->{$node_source}; |
146 |
|
my $mapping = $propmapping->{$node_source}; |
147 |
|
|
148 |
|
if (!$node_target || !$mapping) { |
149 |
|
# loggger.... "no target, sorry!" |
150 |
|
print "error while resolving resource metadata", "\n"; |
151 |
|
return; |
152 |
|
} |
153 |
|
|
154 |
|
if ($opts->{erase}) { |
155 |
|
$self->_erase_all($node_source); |
156 |
|
} |
157 |
|
|
158 |
|
# create new sync object |
159 |
|
my $sync = Data::Transfer::Sync->new( |
160 |
|
storages => { |
161 |
|
L => $self->{bizWorks}->{backend}, |
162 |
|
R => $self->{bizWorks}->{resources}, |
163 |
|
}, |
164 |
|
id_authorities => [qw( L ) ], |
165 |
|
checksum_authorities => [qw( L ) ], |
166 |
|
write_protected => [qw( R ) ], |
167 |
|
verbose => 1, |
168 |
|
); |
169 |
|
|
170 |
|
# sync |
171 |
|
# todo: filter!? |
172 |
|
$sync->syncNodes( { |
173 |
|
direction => $mode, # | +PUSH | +PULL | -FULL | +IMPORT | -EXPORT |
174 |
|
method => 'checksum', # | -timestamp | -manual |
175 |
|
source => "L:$node_source", |
176 |
|
source_ident => 'storage_method:id', |
177 |
|
source_exclude => [qw( id cs )], |
178 |
|
target => "R:$node_target", |
179 |
|
target_ident => 'property:oid', |
180 |
|
mapping => $mapping, |
181 |
|
} ); |
182 |
|
|
183 |
|
} |
184 |
|
|
185 |
|
|
186 |
=head2 NOTE |
=head2 NOTE |
187 |
|
|
188 |
This module heavily relies on DBI and Tangram, but adds a lot of additional bugs and quirks. |
This module heavily relies on DBI and Tangram, but adds a lot of additional bugs and quirks. |
189 |
Please look at their documentation and/or this code for additional information. |
Please look at their documentation and/or this code for additional information. |
190 |
|
|
191 |
|
|
192 |
=head1 REQUIREMENTS |
=head1 REQUIREMENTS |
193 |
|
|
194 |
For full functionality: |
For full functionality: |
195 |
DBI from CPAN |
DBI from CPAN |
196 |
Tangram from CPAN |
DBD::mysql from CPAN |
197 |
Class::Tangram from CPAN |
Tangram 2.04 from CPAN (hmmm, 2.04 won't do in some cases) |
198 |
MySQL::Diff from http://adamspiers.org/computing/mysqldiff/ |
Tangram 2.05 from http://... (2.05 seems okay but there are also additional patches from our side) |
199 |
... and all their dependencies |
Class::Tangram from CPAN |
200 |
|
DBD::CSV from CPAN |
201 |
|
MySQL::Diff from http://adamspiers.org/computing/mysqldiff/ |
202 |
|
... and all their dependencies |
203 |
|
|
204 |
=cut |
=cut |
205 |
|
|
214 |
use Data::Storage::Locator; |
use Data::Storage::Locator; |
215 |
use Data::Dumper; |
use Data::Dumper; |
216 |
|
|
217 |
|
# TODO: wipe out! |
218 |
|
use DBI; |
219 |
|
|
220 |
# TODO: actually implement level (integrate with Log::Dispatch) |
# TODO: actually implement level (integrate with Log::Dispatch) |
221 |
my $TRACELEVEL = 0; |
my $TRACELEVEL = 0; |
222 |
|
|
245 |
# - Deep recursion on subroutine "Data::Storage::AUTOLOAD" |
# - Deep recursion on subroutine "Data::Storage::AUTOLOAD" |
246 |
# - Deep recursion on subroutine "Data::Storage::Handler::Abstract::AUTOLOAD" |
# - Deep recursion on subroutine "Data::Storage::Handler::Abstract::AUTOLOAD" |
247 |
# - Deep recursion on anonymous subroutine at [...] |
# - Deep recursion on anonymous subroutine at [...] |
248 |
# we also might filter log messages caused by logging itself in "advanced logging of AUTOLOAD calls" |
# we also might filter log messages caused by logging to itself in "advanced logging of AUTOLOAD calls" |
249 |
|
|
250 |
my $self = shift; |
my $self = shift; |
251 |
our $AUTOLOAD; |
our $AUTOLOAD; |
267 |
$logstring .= "\t" x $tabcount . "(AUTOLOAD)"; |
$logstring .= "\t" x $tabcount . "(AUTOLOAD)"; |
268 |
# TODO: only ok if logstring doesn't contain |
# TODO: only ok if logstring doesn't contain |
269 |
# e.g. "Data::Storage[Tangram]->insert(SystemEvent=HASH(0x5c0034c)) (AUTOLOAD)" |
# e.g. "Data::Storage[Tangram]->insert(SystemEvent=HASH(0x5c0034c)) (AUTOLOAD)" |
270 |
# but that would be way too specific as long as we don't have an abstract handler for this ;) |
# but that would be _way_ too specific as long as we don't have an abstract handler for this ;) |
271 |
$logger->debug( $logstring ); |
$logger->debug( $logstring ); |
272 |
#print join('; ', @_); |
#print join('; ', @_); |
273 |
} |
} |
274 |
|
|
275 |
# filtering AUTOLOAD calls |
# filtering AUTOLOAD calls and first-time-touch of the actual storage impl |
276 |
if ($self->_filter_AUTOLOAD($method)) { |
if ($self->_filter_AUTOLOAD($method)) { |
277 |
#print "_accessStorage\n"; |
#print "_accessStorage\n"; |
278 |
$self->_accessStorage(); |
$self->_accessStorage(); |
332 |
#my @args = %{$self->{locator}}; |
#my @args = %{$self->{locator}}; |
333 |
my @args = (); |
my @args = (); |
334 |
|
|
335 |
# create new storage handle object, propagate arguments to handler |
# - create new storage handle object |
336 |
# pass locator by reference to be able to store status information in it |
# - propagate arguments to handler |
337 |
|
# - pass locator by reference to be able to store status- or meta-information in it |
338 |
$self->{STORAGEHANDLE} = $pkg->new( locator => $self->{locator}, @args ); |
$self->{STORAGEHANDLE} = $pkg->new( locator => $self->{locator}, @args ); |
339 |
|
|
340 |
} |
} |
368 |
} |
} |
369 |
|
|
370 |
sub removeLogDispatchHandler { |
sub removeLogDispatchHandler { |
371 |
|
my $self = shift; |
372 |
my $self = shift; |
my $name = shift; |
373 |
my $name = shift; |
#my $logger = shift; |
374 |
#my $logger = shift; |
$logger->remove($name); |
|
|
|
|
$logger->remove($name); |
|
|
|
|
375 |
} |
} |
376 |
|
|
377 |
sub getDbName { |
sub getDbName { |
382 |
return $database_name; |
return $database_name; |
383 |
} |
} |
384 |
|
|
|
sub testDsn { |
|
|
my $self = shift; |
|
|
my $dsn = $self->{locator}->{dbi}->{dsn}; |
|
|
my $result; |
|
|
if ( my $dbh = DBI->connect($dsn, '', '', { |
|
|
PrintError => 0, |
|
|
} ) ) { |
|
|
$dbh->disconnect(); |
|
|
return 1; |
|
|
} else { |
|
|
$logger->warning( __PACKAGE__ . "[$self->{locator}->{type}]" . "->testDsn(): " . "DBI-error: " . $DBI::errstr ); |
|
|
} |
|
|
} |
|
|
|
|
385 |
sub testAvailability { |
sub testAvailability { |
386 |
my $self = shift; |
my $self = shift; |
387 |
my $status = $self->testDsn(); |
my $status = $self->testDsn(); |
389 |
return $status; |
return $status; |
390 |
} |
} |
391 |
|
|
392 |
sub createDb { |
sub isConnected { |
393 |
my $self = shift; |
my $self = shift; |
394 |
my $dsn = $self->{locator}->{dbi}->{dsn}; |
# TODO: REVIEW! |
395 |
|
return 1 if $self->{STORAGEHANDLE}; |
396 |
$logger->debug( __PACKAGE__ . "->createDb( dsn $dsn )" ); |
} |
|
|
|
|
$dsn =~ s/database=(.+?);//; |
|
|
my $database_name = $1; |
|
397 |
|
|
398 |
my $ok; |
sub testDsn { |
399 |
|
my $self = shift; |
400 |
|
my $dsn = $self->{locator}->{dbi}->{dsn}; |
401 |
|
my $result; |
402 |
if ( my $dbh = DBI->connect($dsn, '', '', { |
if ( my $dbh = DBI->connect($dsn, '', '', { |
403 |
PrintError => 0, |
PrintError => 0, |
404 |
} ) ) { |
} ) ) { |
405 |
if ($database_name) { |
|
406 |
if ($dbh->do("CREATE DATABASE $database_name;")) { |
# TODO: REVIEW |
|
$ok = 1; |
|
|
} |
|
|
} |
|
407 |
$dbh->disconnect(); |
$dbh->disconnect(); |
408 |
|
|
409 |
|
return 1; |
410 |
|
} else { |
411 |
|
$logger->warning( __PACKAGE__ . "[$self->{locator}->{type}]" . "->testDsn(): " . "DBI-error: " . $DBI::errstr ); |
412 |
} |
} |
|
|
|
|
return $ok; |
|
|
|
|
413 |
} |
} |
414 |
|
|
415 |
sub dropDb { |
sub dropDb { |
431 |
$ok = 1; |
$ok = 1; |
432 |
} |
} |
433 |
} |
} |
434 |
|
|
435 |
$dbh->disconnect(); |
$dbh->disconnect(); |
436 |
|
|
437 |
} |
} |
438 |
|
|
439 |
return $ok; |
return $ok; |
440 |
} |
} |
441 |
|
|
|
sub isConnected { |
|
|
my $self = shift; |
|
|
return 1 if $self->{STORAGEHANDLE}; |
|
|
} |
|
|
|
|
442 |
1; |
1; |
443 |
__END__ |
__END__ |
444 |
|
|
445 |
|
|
446 |
=head1 DESCRIPTION |
=head1 DESCRIPTION |
447 |
|
|
448 |
Data::Storage is module for a accessing various "data structures" stored inside |
=head2 Data::Storage |
|
various "data containers". It sits on top of DBI and/or Tangram. |
|
449 |
|
|
450 |
|
Data::Storage is a module for accessing various "data structures / kinds of structured data" stored inside |
451 |
|
various "data containers". |
452 |
|
We tried to use the AdapterPattern (http://c2.com/cgi/wiki?AdapterPattern) to implement a wrapper-layer |
453 |
|
around core CPAN modules (Tangram, DBI). |
454 |
|
|
455 |
|
=head2 Why? |
456 |
|
|
457 |
|
You will get a better code-structure (not bad for later maintenance) in growing Perl code projects, |
458 |
|
especially when using multiple database connections at the same time. |
459 |
|
You will be able to switch between different _kinds_ of implementations used for storing data. |
460 |
|
Your code will use the very same API to access these storage layers. |
461 |
|
... implementation has to be changed for now |
462 |
|
Maybe you will be able to switch "on-the-fly" without changing any bits in code in the future.... |
463 |
|
... but that's not the focus |
464 |
|
|
465 |
=head1 AUTHORS / COPYRIGHT |
=head2 What else? |
466 |
|
|
467 |
|
Having this, we were able to do implement a generic data synchronization module more easy, |
468 |
|
please look at Data::Transfer. |
469 |
|
|
|
The Data::Storage module is Copyright (c) 2002 Andreas Motl. |
|
|
All rights reserved. |
|
470 |
|
|
471 |
You may distribute it under the terms of either the GNU General Public |
=head1 AUTHORS / COPYRIGHT |
472 |
License or the Artistic License, as specified in the Perl README file. |
|
473 |
|
The Data::Storage module is Copyright (c) 2002 Andreas Motl. |
474 |
|
All rights reserved. |
475 |
|
You may distribute it under the terms of either the GNU General Public |
476 |
|
License or the Artistic License, as specified in the Perl README file. |
477 |
|
|
478 |
|
|
479 |
=head1 ACKNOWLEDGEMENTS |
=head1 ACKNOWLEDGEMENTS |
480 |
|
|
481 |
Larry Wall for Perl, Tim Bunce for DBI, Jean-Louis Leroy for Tangram and Set::Object, |
Larry Wall for Perl, Tim Bunce for DBI, Jean-Louis Leroy for Tangram and Set::Object, |
482 |
Sam Vilain for Class::Tangram, Adam Spiers for MySQL::Diff and all contributors. |
Sam Vilain for Class::Tangram, Jochen Wiedmann and Jeff Zucker for DBD::CSV & Co., |
483 |
|
Adam Spiers for MySQL::Diff and all contributors. |
484 |
|
|
485 |
|
|
486 |
=head1 SUPPORT / WARRANTY |
=head1 SUPPORT / WARRANTY |
487 |
|
|
488 |
Data::Storage is free software. IT COMES WITHOUT WARRANTY OF ANY KIND. |
Data::Storage is free software. IT COMES WITHOUT WARRANTY OF ANY KIND. |
489 |
|
|
490 |
|
|
491 |
=head1 TODO |
=head1 TODO |
492 |
|
|
493 |
|
|
494 |
=head2 Handle the following errors/cases: |
=head2 BUGS |
495 |
|
|
496 |
|
"DBI-Error [Tangram]: DBD::mysql::st execute failed: Unknown column 't1.requestdump' in 'field list'" |
497 |
|
|
498 |
=head3 "DBI-Error [Tangram]: DBD::mysql::st execute failed: Unknown column 't1.requestdump' in 'field list'" |
... occours when operating on object-attributes not introduced yet: |
499 |
|
this should be detected and appended/replaced through: |
500 |
|
"Schema-Error detected, maybe (just) an inconsistency. |
501 |
|
Please check if your declaration in schema-module "a" matches structure in database "b" or try to run" |
502 |
|
db_setup.pl --dbkey=import --action=deploy |
503 |
|
|
|
... occours when operating on object-attributes not introduced yet: |
|
|
this should be detected and appended/replaced through: |
|
|
"Schema-Error detected, maybe (just) an inconsistency. |
|
|
Please check if your declaration in schema-module "a" matches structure in database "b" or try to run" |
|
|
db_setup.pl --dbkey=import --action=deploy |
|
504 |
|
|
505 |
=head3 Compare schema (structure diff) with database ... |
Compare schema (structure diff) with database ... |
506 |
|
|
507 |
... when issuing "db_setup.pl --dbkey=import --action=deploy" |
... when issuing "db_setup.pl --dbkey=import --action=deploy" |
508 |
on a database with an already deployed schema, use an additional "--update" then |
on a database with an already deployed schema, use an additional "--update" then |
531 |
automatically and this is believed to be the most common case under normal circumstances. |
automatically and this is believed to be the most common case under normal circumstances. |
532 |
|
|
533 |
|
|
534 |
=head2 Introduce some features: |
=head2 FEATURES |
535 |
|
|
536 |
- Get this stuff together with UML (Unified Modeling Language) and/or standards from ODMG. |
- Get this stuff together with UML (Unified Modeling Language) and/or standards from ODMG. |
537 |
- Make it possible to load/save schemas in XMI (XML Metadata Interchange), |
- Make it possible to load/save schemas in XMI (XML Metadata Interchange), |
539 |
Integrate/bundle this with a web-/html-based UML modeling tool or |
Integrate/bundle this with a web-/html-based UML modeling tool or |
540 |
some other interesting stuff like the "Co-operative UML Editor" from Uni Darmstadt. (web-/java-based) |
some other interesting stuff like the "Co-operative UML Editor" from Uni Darmstadt. (web-/java-based) |
541 |
- Enable Round Trip Engineering. Keep code and diagrams in sync. Don't annoy/bother the programmers. |
- Enable Round Trip Engineering. Keep code and diagrams in sync. Don't annoy/bother the programmers. |
542 |
- Add some more handlers: |
- Add support for some more handlers/locators to be able to |
543 |
- look at DBD::CSV, Text::CSV, XML::CSV, XML::Excel |
access the following standards/protocols/interfaces/programs/apis transparently: |
544 |
- Add some more locations/locators: |
+ DBD::CSV (via Data::Storage::Handler::DBI) |
545 |
- PerlDAV: http://www.webdav.org/perldav/ |
(-) Text::CSV, XML::CSV, XML::Excel |
546 |
- Move to t3, use InCASE |
- MAPI |
547 |
|
- LDAP |
548 |
|
- DAV (look at PerlDAV: http://www.webdav.org/perldav/) |
549 |
|
- Mbox (use formail for seperating/splitting entries/nodes) |
550 |
|
- Cyrus (cyrdeliver - what about cyrretrieve (export)???) |
551 |
|
- use File::DiffTree, use File::Compare |
552 |
|
- Hibernate |
553 |
|
- "Win32::UserAccountDb" |
554 |
|
- "*nix::UserAccountDb" |
555 |
|
- .wab - files (Windows Address Book) |
556 |
|
- .pst - files (Outlook Post Storage?) |
557 |
|
- XML (e.g. via XML::Simple?) |
558 |
|
- Move to t3, look at InCASE |
559 |
|
- some kind of security layer for methods/objects |
560 |
|
- acls (stored via tangram/ldap?) for functions, methods and objects (entity- & data!?) |
561 |
|
- where are the hooks needed then? |
562 |
|
- is Data::Storage & Co. okay, or do we have to touch the innards of DBI and/or Tangram? |
563 |
|
- an attempt to start could be: |
564 |
|
- 'sub getACLByObjectId($id, $context)' |
565 |
|
- 'sub getACLByMethodname($id, $context)' |
566 |
|
- 'sub getACLByName($id, $context)' |
567 |
|
( would require a kinda registry to look up these very names pointing to arbitrary locations (code, data, ...) ) |
568 |
|
- add more hooks and various levels |
569 |
|
- better integrate introduced 'getObjectByGuid'-mechanism from Data::Storage::Handler::Tangram |
570 |
|
|
571 |
|
|
572 |
=head3 Links: |
=head3 LINKS / REFERENCES |
573 |
|
|
574 |
Specs: |
Specs: |
575 |
UML 1.3 Spec: http://cgi.omg.org/cgi-bin/doc?ad/99-06-08.pdf |
UML 1.3 Spec: http://cgi.omg.org/cgi-bin/doc?ad/99-06-08.pdf |