3 |
# $Id$ |
# $Id$ |
4 |
# |
# |
5 |
# $Log$ |
# $Log$ |
6 |
|
# Revision 1.12 2003/01/30 22:28:21 joko |
7 |
|
# + implemented new concrete methods |
8 |
|
# |
9 |
|
# Revision 1.11 2002/12/19 16:31:05 joko |
10 |
|
# + sub dropDb |
11 |
|
# + sub rebuildDb |
12 |
|
# |
13 |
|
# Revision 1.10 2002/12/15 02:02:22 joko |
14 |
|
# + fixed logging-message |
15 |
|
# |
16 |
|
# Revision 1.9 2002/12/05 07:58:20 joko |
17 |
|
# + now using Tie::SecureHash as a base for the COREHANDLE |
18 |
|
# + former public COREHANDLE becomes private _COREHANDLE now |
19 |
|
# |
20 |
|
# Revision 1.8 2002/12/01 22:20:43 joko |
21 |
|
# + sub createDb (from Storage.pm) |
22 |
|
# |
23 |
|
# Revision 1.7 2002/12/01 07:09:09 joko |
24 |
|
# + sub getListFiltered (dummy redirecting to getListUnfiltered) |
25 |
|
# |
26 |
|
# Revision 1.6 2002/12/01 04:46:01 joko |
27 |
|
# + sub eraseAll |
28 |
|
# |
29 |
|
# Revision 1.5 2002/11/29 05:00:26 joko |
30 |
|
# + sub getListUnfiltered |
31 |
|
# + sub sendQuery |
32 |
|
# |
33 |
|
# Revision 1.4 2002/11/17 08:46:42 jonen |
34 |
|
# + wrapped eval around DBI->connect to prevent deaths |
35 |
|
# |
36 |
|
# Revision 1.3 2002/11/17 06:34:39 joko |
37 |
|
# + locator metadata can now be reached via ->{locator} |
38 |
|
# - sub hash2sql now taken from libdb |
39 |
|
# |
40 |
|
# Revision 1.2 2002/10/25 11:43:27 joko |
41 |
|
# + enhanced robustness |
42 |
|
# + more logging for debug-levels |
43 |
|
# |
44 |
# Revision 1.1 2002/10/10 03:44:07 cvsjoko |
# Revision 1.1 2002/10/10 03:44:07 cvsjoko |
45 |
# + new |
# + new |
46 |
# |
# |
55 |
use base ("Data::Storage::Handler::Abstract"); |
use base ("Data::Storage::Handler::Abstract"); |
56 |
|
|
57 |
use DBI; |
use DBI; |
58 |
|
use Data::Dumper; |
59 |
|
use libdb qw( getDbNameByDsn hash2Sql ); |
60 |
|
use Data::Storage::Result::DBI; |
61 |
|
|
62 |
# get logger instance |
# get logger instance |
63 |
my $logger = Log::Dispatch::Config->instance; |
my $logger = Log::Dispatch::Config->instance; |
64 |
|
|
65 |
|
|
66 |
our $metainfo = { |
sub getMetaInfo { |
67 |
'disconnectMethod' => 'disconnect', |
my $self = shift; |
68 |
}; |
$logger->debug( __PACKAGE__ . "->getMetaInfo()" ); |
69 |
|
return { |
70 |
|
'disconnectMethod' => 'disconnect', |
71 |
|
}; |
72 |
|
} |
73 |
|
|
74 |
sub connect { |
sub connect { |
75 |
|
|
76 |
my $self = shift; |
my $self = shift; |
77 |
|
|
78 |
# create handle |
# create handle |
79 |
if ( my $dsn = $self->{dbi}->{dsn} ) { |
if ( my $dsn = $self->{locator}->{dbi}->{dsn} ) { |
80 |
$logger->debug( __PACKAGE__ . "->connect($dsn)" ); |
#if ( my $dsn = $self->{locator}->{dsn} ) { |
81 |
$self->{COREHANDLE} = DBI->connect($dsn); |
$logger->debug( __PACKAGE__ . "->connect( dsn $dsn )" ); |
82 |
|
|
83 |
|
# HACK: |
84 |
|
# set errorhandler before actually calling DBI->connect |
85 |
|
# in order to catch errors from the very beginning |
86 |
|
#DBI->{HandleError} = $self->{dbi}->{HandleError}; |
87 |
|
|
88 |
|
#use Data::Dumper; print Dumper($self->{dbi}); |
89 |
|
|
90 |
|
eval { |
91 |
|
$self->{_COREHANDLE} = DBI->connect( $dsn, '', '', $self->{locator}->{dbi} ); |
92 |
|
if (!$self->{_COREHANDLE}) { |
93 |
|
$logger->warning( __PACKAGE__ . "->connect failed: " . DBI::errstr ); |
94 |
|
return; |
95 |
|
} |
96 |
|
}; |
97 |
|
$logger->warning( __PACKAGE__ . "->connect failed: " . $@ ) if $@; |
98 |
|
|
99 |
} |
} |
100 |
$self->configureCOREHANDLE(); |
$self->configureCOREHANDLE(); |
101 |
|
|
102 |
|
$self->{locator}->{status}->{connected} = 1; |
103 |
|
|
104 |
|
return 1; |
105 |
|
|
106 |
} |
} |
107 |
|
|
109 |
|
|
110 |
my $self = shift; |
my $self = shift; |
111 |
|
|
112 |
$logger->debug( __PACKAGE__ . "->_configureCOREHANDLE" ); |
$logger->debug( __PACKAGE__ . "->configureCOREHANDLE" ); |
113 |
|
|
114 |
|
return if !$self->{_COREHANDLE}; |
115 |
|
|
116 |
# apply configured modifications |
# apply configured modifications to DBI-handle |
117 |
if (exists $self->{dbi}->{trace_level} && exists $self->{dbi}->{trace_file}) { |
if (exists $self->{locator}->{dbi}->{trace_level} && exists $self->{locator}->{dbi}->{trace_file}) { |
118 |
$self->{COREHANDLE}->trace($self->{dbi}->{trace_level}, $self->{dbi}->{trace_file}); |
$self->{_COREHANDLE}->trace($self->{locator}->{dbi}->{trace_level}, $self->{locator}->{dbi}->{trace_file}); |
119 |
} |
} |
120 |
if (exists $self->{dbi}->{RaiseError}) { |
if (exists $self->{locator}->{dbi}->{RaiseError}) { |
121 |
$self->{COREHANDLE}->{RaiseError} = $self->{dbi}->{RaiseError}; |
$self->{_COREHANDLE}->{RaiseError} = $self->{locator}->{dbi}->{RaiseError}; |
122 |
} |
} |
123 |
if (exists $self->{dbi}->{PrintError}) { |
if (exists $self->{locator}->{dbi}->{PrintError}) { |
124 |
$self->{COREHANDLE}->{PrintError} = $self->{dbi}->{PrintError}; |
$self->{_COREHANDLE}->{PrintError} = $self->{locator}->{dbi}->{PrintError}; |
125 |
} |
} |
126 |
if (exists $self->{dbi}->{HandleError}) { |
if (exists $self->{locator}->{dbi}->{HandleError}) { |
127 |
$self->{COREHANDLE}->{HandleError} = $self->{dbi}->{HandleError}; |
$self->{_COREHANDLE}->{HandleError} = $self->{locator}->{dbi}->{HandleError}; |
128 |
} |
} |
129 |
|
|
130 |
} |
} |
132 |
sub _sendSql { |
sub _sendSql { |
133 |
my $self = shift; |
my $self = shift; |
134 |
my $sql = shift; |
my $sql = shift; |
135 |
my $sth = $self->{COREHANDLE}->prepare($sql); |
|
136 |
|
# two-level handling for implicit connect: |
137 |
|
# if there's no corehandle ... |
138 |
|
if (!$self->{_COREHANDLE}) { |
139 |
|
# ... try to connect, but ... |
140 |
|
$self->connect(); |
141 |
|
# ... if this still fails, there's something wrong probably, so we won't continue |
142 |
|
if (!$self->{_COREHANDLE}) { |
143 |
|
return; |
144 |
|
} |
145 |
|
} |
146 |
|
|
147 |
|
#print "prepare sql: $sql\n"; |
148 |
|
|
149 |
|
my $sth = $self->{_COREHANDLE}->prepare($sql); |
150 |
$sth->execute(); |
$sth->execute(); |
151 |
return $sth; |
return $sth; |
152 |
} |
} |
154 |
sub sendCommand { |
sub sendCommand { |
155 |
my $self = shift; |
my $self = shift; |
156 |
my $command = shift; |
my $command = shift; |
157 |
|
# TODO: when tracing: yes, do actually log this |
158 |
|
#$logger->debug( __PACKAGE__ . "->sendCommand( command $command )" ); |
159 |
my $cmdHandle = $self->_sendSql($command); |
my $cmdHandle = $self->_sendSql($command); |
160 |
my $result = Data::Storage::Result::DBI->new( RESULTHANDLE => $cmdHandle ); |
my $result = Data::Storage::Result::DBI->new( RESULTHANDLE => $cmdHandle ); |
161 |
return $result; |
return $result; |
162 |
} |
} |
163 |
|
|
164 |
sub quoteSql { |
sub getChildNodes { |
165 |
my $self = shift; |
my $self = shift; |
166 |
my $string = shift; |
my @nodes; |
167 |
if ($string) { |
$logger->debug( __PACKAGE__ . "->getChildNodes()" ); |
168 |
$string =~ s/'/\\'/g; |
my $locator = $self->{locator}; |
169 |
|
#print Dumper($locator); exit; |
170 |
|
if (my $result = $self->sendCommand( 'SHOW TABLES;' ) ) { |
171 |
|
my $dbname = getDbNameByDsn($self->{locator}->{dbi}->{dsn}); |
172 |
|
my $key = "Tables_in_$dbname"; |
173 |
|
while ( my $row = $result->getNextEntry() ) { |
174 |
|
push @nodes, $row->{$key}; |
175 |
|
} |
176 |
} |
} |
177 |
return $string; |
return \@nodes; |
178 |
} |
} |
179 |
|
|
180 |
sub hash2Sql { |
sub getListUnfiltered { |
|
|
|
181 |
my $self = shift; |
my $self = shift; |
182 |
|
my $nodename = shift; |
183 |
my $table = shift; |
my @list; |
184 |
my $hash = shift; |
$logger->debug( __PACKAGE__ . "->getListUnfiltered( nodename => '" . $nodename . "' )" ); |
185 |
my $mode = shift; |
# get list of rows from rdbms by table name |
186 |
my $crit = shift; |
my $result = $self->sendCommand("SELECT * FROM $nodename"); |
187 |
|
while ( my $row = $result->getNextEntry() ) { |
188 |
my $sql; |
push @list, $row; |
|
if ($mode eq 'SQL_INSERT') { |
|
|
$sql = "INSERT INTO $table (#fields#) VALUES (#values#);"; |
|
189 |
} |
} |
190 |
if ($mode eq 'SQL_UPDATE') { |
return \@list; |
191 |
$sql = "UPDATE $table SET #fields-values# WHERE $crit;"; |
} |
192 |
} |
|
193 |
|
sub sendQuery { |
194 |
my (@fields, @values); |
my $self = shift; |
195 |
foreach my $key (keys %{$hash}) { |
my $query = shift; |
196 |
push @fields, $key; |
|
197 |
push @values, $hash->{$key}; |
$logger->debug( __PACKAGE__ . "->sendQuery" ); |
198 |
} |
|
199 |
# quote each element |
#my $sql = "SELECT cs FROM $self->{metainfo}->{$descent}->{node} WHERE $self->{metainfo}->{$descent}->{IdentProvider}->{arg}='$self->{entry}->{source}->{ident}';"; |
200 |
map { if (defined $_) { $_ = "'$_'" } else { $_ = "null" } } @values; |
#my $result = $self->{metainfo}->{$descent}->{storage}->sendCommand($sql); |
201 |
|
my @crits; |
202 |
my $fields = join(', ', @fields); |
foreach (@{$query->{criterias}}) { |
203 |
my $values = join(', ', @values); |
my $op = ''; |
204 |
my $fields_values = ''; |
$op = '=' if lc $_->{op} eq 'eq'; |
205 |
my $fc = 0; |
push @crits, "$_->{key}$op'$_->{val}'"; |
|
foreach (@fields) { |
|
|
$fields_values .= $_ . '=' . $values[$fc] . ', '; |
|
|
$fc++; |
|
206 |
} |
} |
207 |
$fields_values = substr($fields_values, 0, -2); |
my $subnodes = {}; |
208 |
|
map { $subnodes->{$_}++ } @{$query->{subnodes}}; |
209 |
$sql =~ s/#fields#/$fields/; |
# HACK: this is hardcoded ;( expand possibilities! |
210 |
$sql =~ s/#values#/$values/; |
my $crit = join(' AND ', @crits); |
211 |
$sql =~ s/#fields-values#/$fields_values/; |
my $sql = hash2Sql($query->{node}, $subnodes, 'SELECT', $crit); |
212 |
|
return $self->sendCommand($sql); |
|
return $sql; |
|
213 |
} |
} |
214 |
|
|
215 |
|
sub eraseAll { |
216 |
|
my $self = shift; |
217 |
|
my $classname = shift; |
218 |
|
$logger->debug( __PACKAGE__ . "->eraseAll" ); |
219 |
|
my $sql = "DELETE FROM $classname"; |
220 |
|
$self->sendCommand($sql); |
221 |
|
} |
222 |
|
|
223 |
sub getChildNodes { |
# TODO: actually implement the filtering functionality using $this->sendQuery |
224 |
|
sub getListFiltered { |
225 |
|
my $self = shift; |
226 |
|
my $nodename = shift; |
227 |
|
return $self->getListUnfiltered($nodename); |
228 |
|
} |
229 |
|
|
230 |
|
# TODO: do this via a parametrized "$self->connect(<connect just to database server - don't select database>)" |
231 |
|
sub createDb { |
232 |
|
|
233 |
my $self = shift; |
my $self = shift; |
234 |
my @nodes; |
|
235 |
|
# get dsn from Data::Storage::Locator instance |
236 |
|
my $dsn = $self->{locator}->{dbi}->{dsn}; |
237 |
|
|
238 |
if (my $result = $self->sendCommand( 'SHOW TABLES;' ) ) { |
$logger->debug( __PACKAGE__ . "->createDb( dsn $dsn )" ); |
239 |
while ( my $row = $result->_getNextEntry() ) { |
|
240 |
push @nodes, $row; |
# remove database setting from dsn-string |
241 |
|
$dsn =~ s/database=(.+?);//; |
242 |
|
|
243 |
|
# remember extracted database name to know what actually to create right now |
244 |
|
my $database_name = $1; |
245 |
|
|
246 |
|
# flag to indicate goodness |
247 |
|
my $ok; |
248 |
|
|
249 |
|
# connect to database server - don't select/use any specific database |
250 |
|
#if ( my $dbh = DBI->connect($dsn, '', '', { PrintError => 0 } ) ) { |
251 |
|
if ( my $dbh = DBI->connect($dsn, '', '', $self->{locator}->{dbi} ) ) { |
252 |
|
|
253 |
|
if ($database_name) { |
254 |
|
if ($dbh->do("CREATE DATABASE $database_name")) { |
255 |
|
$ok = 1; |
256 |
|
} |
257 |
} |
} |
258 |
|
|
259 |
|
$dbh->disconnect(); |
260 |
|
|
261 |
} |
} |
262 |
|
|
263 |
return \@nodes; |
return $ok; |
264 |
|
|
265 |
|
} |
266 |
|
|
267 |
|
sub getCOREHANDLE2 { |
268 |
|
my $self = shift; |
269 |
|
return $self->{_COREHANDLE}; |
270 |
} |
} |
271 |
|
|
272 |
|
sub dropDb { |
273 |
|
my $self = shift; |
274 |
|
my $dsn = $self->{locator}->{dbi}->{dsn}; |
275 |
|
|
276 |
|
$logger->debug( __PACKAGE__ . "->dropDb( dsn $dsn )" ); |
277 |
|
|
278 |
|
$dsn =~ s/database=(.+?);//; |
279 |
|
my $database_name = $1; |
280 |
|
|
281 |
package Data::Storage::Result::DBI; |
my $ok; |
282 |
|
|
283 |
|
if ( my $dbh = DBI->connect($dsn, '', '', { |
284 |
|
PrintError => 0, |
285 |
|
} ) ) { |
286 |
|
if ($database_name) { |
287 |
|
if ($dbh->do("DROP DATABASE $database_name;")) { |
288 |
|
$ok = 1; |
289 |
|
} |
290 |
|
} |
291 |
|
|
292 |
use strict; |
$dbh->disconnect(); |
|
use warnings; |
|
293 |
|
|
294 |
use base ("Data::Storage::Result"); |
} |
295 |
|
|
296 |
|
return $ok; |
297 |
|
} |
298 |
|
|
299 |
sub DESTROY { |
sub rebuildDb { |
300 |
my $self = shift; |
my $self = shift; |
301 |
#$logger->debug( __PACKAGE__ . "->" . "DESTROY" ); |
$logger->info( __PACKAGE__ . "->rebuildDb()" ); |
302 |
$self->{RESULTHANDLE}->finish(); |
my @results; |
303 |
|
|
304 |
|
# sum up results (bool (0/1)) in array |
305 |
|
#push @results, $self->retreatSchema(); |
306 |
|
push @results, $self->dropDb(); |
307 |
|
push @results, $self->createDb(); |
308 |
|
#push @results, $self->deploySchema(); |
309 |
|
|
310 |
|
# scan array for "bad ones" |
311 |
|
my $res = 1; |
312 |
|
map { |
313 |
|
$res = 0 if (!$_); |
314 |
|
} @results; |
315 |
|
|
316 |
|
return $res; |
317 |
} |
} |
318 |
|
|
319 |
sub _getNextEntry { |
sub testAvailability { |
320 |
my $self = shift; |
my $self = shift; |
321 |
return $self->{RESULTHANDLE}->fetchrow_hashref; |
my $status = $self->testDsn(); |
322 |
|
$self->{locator}->{status}->{available} = $status; |
323 |
|
return $status; |
324 |
} |
} |
325 |
|
|
326 |
|
sub testDsn { |
327 |
|
my $self = shift; |
328 |
|
my $dsn = $self->{locator}->{dbi}->{dsn}; |
329 |
|
my $result; |
330 |
|
if ( my $dbh = DBI->connect($dsn, '', '', { |
331 |
|
PrintError => 0, |
332 |
|
} ) ) { |
333 |
|
|
334 |
|
# TODO: REVIEW |
335 |
|
$dbh->disconnect(); |
336 |
|
|
337 |
|
return 1; |
338 |
|
} else { |
339 |
|
$logger->warning( __PACKAGE__ . "[$self->{locator}->{type}]" . "->testDsn(): " . "DBI-error: " . $DBI::errstr ); |
340 |
|
} |
341 |
|
} |
342 |
|
|
|
1; |
|
343 |
|
1; |
344 |
|
__END__ |