fixed some db queries.

Identified (but haven't yet fixed) lack of error handling when reading
from CA.
This commit is contained in:
hjp 2008-05-04 13:42:31 +00:00
parent 6ce3a6b0c5
commit 30512f5471
1 changed files with 19 additions and 11 deletions

View File

@ -404,39 +404,40 @@ sub db_record_version {
where where
file_type=? and file_size=? and file_mtime=? and file_type=? and file_size=? and file_mtime=? and
file_owner=? and file_group=? and file_acl=? and file_owner=? and file_group=? and file_acl=? and
file_unix_bits=? and file_unix_bits=?
checksum=?
"; ";
my @args = ( my @args = (
$f->{t}, $f->{s}, $f->{m}, $f->{t}, $f->{s}, $f->{m},
$f->{o}, $f->{g}, $f->{acl}, $f->{o}, $f->{g}, $f->{acl},
join(',', map {$f->{$_} ? ($_) : ()} qw(setuid setgid sticky)), join(',', map {$f->{$_} ? ($_) : ()} qw(setuid setgid sticky)),
$f->{checksum}
); );
if ($f->{t} eq 'b' || $f->{t} eq 'c') { if ($f->{t} eq 'f') {
$query .= " and file_rdev=?"; $query .= " and checksum=?";
push @args, $f->{rdev}; push @args, $f->{checksum};
} } elsif ($f->{t} eq 'l') {
if ($f->{t} eq 'l') {
$query .= " and file_linktarget=?"; $query .= " and file_linktarget=?";
push @args, $f->{lt}; push @args, $f->{lt};
} elsif ($f->{t} eq 'b' || $f->{t} eq 'c') {
$query .= " and file_rdev=?";
push @args, $f->{rdev};
} }
my $version_id = $self->{dbh}->selectrow_array($query, {}, @args); my $version_id = $self->{dbh}->selectrow_array($query, {}, @args);
unless ($version_id) { unless ($version_id) {
# XXX why is $f->{checksum} undef here for ./bin/dash?
$self->{dbh}->do("insert into versions2( $self->{dbh}->do("insert into versions2(
file_type, file_size, file_mtime, file_type, file_size, file_mtime,
file_owner, file_group, file_acl, file_owner, file_group, file_acl,
file_unix_bits, file_unix_bits,
file_rdev, file_rdev,
checksum, file_linktarget, checksum, file_linktarget
) )
values( values(
?, ?, ?, ?, ?, ?,
?, ?, ?, ?, ?, ?,
?, ?,
?, ?,
?, ?, ?, ?
)", )",
{}, {},
$f->{t}, $f->{s}, $f->{m}, $f->{t}, $f->{s}, $f->{m},
@ -512,13 +513,14 @@ and it is more likely that we can link to new copies than to old ones.
sub linkdup { sub linkdup {
my ($self, $f, $backup_filename) = @_; my ($self, $f, $backup_filename) = @_;
# XXX # XXX
my $sth = $self->{dbh}->prepare("select * from versions2, instances, files my $sth = $self->{dbh}->prepare("select * from versions2, instances, files, sessions
where file_type=? and file_size=? and file_mtime=? where file_type=? and file_size=? and file_mtime=?
and file_owner=? and file_group=? and file_acl=? and file_owner=? and file_group=? and file_acl=?
and file_unix_bits=? and file_unix_bits=?
and checksum=? and online=1 and checksum=? and online=1
and instances.file=files.id and instances.file=files.id
and versions2.id=instances.version and versions2.id=instances.version
and instances.session=sessions.id
order by instances.session desc order by instances.session desc
"); ");
$sth->execute( $sth->execute(
@ -628,6 +630,12 @@ sub store_file {
} }
} else { } else {
$self->log(5, "unexpected header $header\n"); $self->log(5, "unexpected header $header\n");
# How can we recover here?
# Kill connection so that a new one is created for the next file
# Return failure so that this file is not recorded as done
#
# same problem for other failures in this method.
die "unexpected header $header";
} }
} }