mkdir_p problem

This commit is contained in:
hjp 2008-06-20 06:19:18 +00:00
parent 30512f5471
commit 2edaa67ccc
2 changed files with 37 additions and 10 deletions

19
Notes
View File

@ -67,3 +67,22 @@ exit if disk full
On my 800 MHz PIII, the CPU usage is rather high. Some profiling seems
to be necessary (or I should get a faster backup server :-)).
mkdir_p doesn't report the real reason of a failure:
mkdir_p('/backup/2008-06-20T08.10.56/zeno.hjp.at/.', 777)
mkdir_p('/backup/2008-06-20T08.10.56/zeno.hjp.at', 777)
mkdir_p('/backup/2008-06-20T08.10.56', 777)
failed: Read-only file system
cannot mkdir /backup/2008-06-20T08.10.56/zeno.hjp.at/.: No such file or directory at /usr/local/share/perl/5.8.8/Simba/CA.pm line 180, <GEN1> line 1.
The real reason is "Read-only file system" but after mkdir_p returns,
$! is "No such file or directory". (and anyway Simba::CA::backup2disk
shouldn't just die, but write a message to the log file first, but
that's a different problem)
Ideas:
* Check if File::Path behaves better.
* Die on error and let caller catch the error.

View File

@ -171,8 +171,9 @@ sub backup2disk {
chomp;
my $f = $self->parse($_);
my $success = 1;
if ($f->{t} eq 'f') {
$self->store_file($f);
$success = $self->store_file($f);
} elsif ($f->{t} eq 'd') {
my $d = "$self->{this_backup}/$f->{name}";
$d =~ s,//+,/,g;
@ -190,7 +191,7 @@ sub backup2disk {
$self->log(5, "ignored $_\n");
}
# insert into DB.
$self->db_record_version($target, $f);
$self->db_record_version($target, $f) if ($success);
}
$self->close_session();
$self->log(3, "finished backup for target host " . $target->{host} . " dir " . $target->{dir} . ": $count files");
@ -473,6 +474,12 @@ sub new_session {
sub close_session {
my ($self) = @_;
$self->{dbh}->do("update sessions set end_date=? where id=?", {}, time(), $self->{session_id});
$self->close_file_connection;
delete $self->{target};
}
sub close_file_connection {
my ($self) = @_;
if ($self->{file_pid}) {
close($self->{file_cfd});
close($self->{file_dfd});
@ -484,7 +491,6 @@ sub close_session {
delete $self->{file_dfd};
delete $self->{file_pid};
}
delete $self->{target};
}
sub get_last_session_id {
@ -567,10 +573,12 @@ can be created to an existing file, create a new one.
sub store_file {
my ($self, $f) = @_;
my $success = 1;
if($self->present($f)) {
if (link("$self->{last_backup}/$f->{name}", "$self->{this_backup}/$f->{name}")) {
$self->log(10, "linked");
return;
return $success;
} else {
$self->log(5, "cannot link $self->{last_backup}/$f->{name} to $self->{this_backup}/$f->{name}: $!");
}
@ -615,6 +623,7 @@ sub store_file {
$trailer = $self->{file_dfd}->getline;
if ($trailer =~ /^fail /) {
$self->log(5, $trailer);
$success = 0;
} elsif ($trailer =~ /^chk sha1 (\w+)/) {
my $checksum = $sha1->hexdigest;
if ($checksum ne $1) {
@ -623,6 +632,8 @@ sub store_file {
$f->{checksum} = $checksum;
} else {
$self->log(5, "unexpected trailer $trailer\n");
$self->close_file_connection;
$success = 0;
}
unless ($self->linkdup($f, $backup_filename)) {
$self->setmeta($f);
@ -630,13 +641,10 @@ sub store_file {
}
} else {
$self->log(5, "unexpected header $header\n");
# How can we recover here?
# Kill connection so that a new one is created for the next file
# Return failure so that this file is not recorded as done
#
# same problem for other failures in this method.
die "unexpected header $header";
$self->close_file_connection;
$success = 0;
}
return $success;
}
sub DESTROY {