mkdir_p problem
This commit is contained in:
parent
30512f5471
commit
2edaa67ccc
19
Notes
19
Notes
|
@ -67,3 +67,22 @@ exit if disk full
|
||||||
|
|
||||||
On my 800 MHz PIII, the CPU usage is rather high. Some profiling seems
|
On my 800 MHz PIII, the CPU usage is rather high. Some profiling seems
|
||||||
to be necessary (or I should get a faster backup server :-)).
|
to be necessary (or I should get a faster backup server :-)).
|
||||||
|
|
||||||
|
|
||||||
|
mkdir_p doesn't report the real reason of a failure:
|
||||||
|
|
||||||
|
mkdir_p('/backup/2008-06-20T08.10.56/zeno.hjp.at/.', 777)
|
||||||
|
mkdir_p('/backup/2008-06-20T08.10.56/zeno.hjp.at', 777)
|
||||||
|
mkdir_p('/backup/2008-06-20T08.10.56', 777)
|
||||||
|
failed: Read-only file system
|
||||||
|
cannot mkdir /backup/2008-06-20T08.10.56/zeno.hjp.at/.: No such file or directory at /usr/local/share/perl/5.8.8/Simba/CA.pm line 180, <GEN1> line 1.
|
||||||
|
|
||||||
|
The real reason is "Read-only file system" but after mkdir_p returns,
|
||||||
|
$! is "No such file or directory". (and anyway Simba::CA::backup2disk
|
||||||
|
shouldn't just die, but write a message to the log file first, but
|
||||||
|
that's a different problem)
|
||||||
|
|
||||||
|
Ideas:
|
||||||
|
|
||||||
|
* Check if File::Path behaves better.
|
||||||
|
* Die on error and let caller catch the error.
|
||||||
|
|
|
@ -171,8 +171,9 @@ sub backup2disk {
|
||||||
chomp;
|
chomp;
|
||||||
my $f = $self->parse($_);
|
my $f = $self->parse($_);
|
||||||
|
|
||||||
|
my $success = 1;
|
||||||
if ($f->{t} eq 'f') {
|
if ($f->{t} eq 'f') {
|
||||||
$self->store_file($f);
|
$success = $self->store_file($f);
|
||||||
} elsif ($f->{t} eq 'd') {
|
} elsif ($f->{t} eq 'd') {
|
||||||
my $d = "$self->{this_backup}/$f->{name}";
|
my $d = "$self->{this_backup}/$f->{name}";
|
||||||
$d =~ s,//+,/,g;
|
$d =~ s,//+,/,g;
|
||||||
|
@ -190,7 +191,7 @@ sub backup2disk {
|
||||||
$self->log(5, "ignored $_\n");
|
$self->log(5, "ignored $_\n");
|
||||||
}
|
}
|
||||||
# insert into DB.
|
# insert into DB.
|
||||||
$self->db_record_version($target, $f);
|
$self->db_record_version($target, $f) if ($success);
|
||||||
}
|
}
|
||||||
$self->close_session();
|
$self->close_session();
|
||||||
$self->log(3, "finished backup for target host " . $target->{host} . " dir " . $target->{dir} . ": $count files");
|
$self->log(3, "finished backup for target host " . $target->{host} . " dir " . $target->{dir} . ": $count files");
|
||||||
|
@ -473,6 +474,12 @@ sub new_session {
|
||||||
sub close_session {
|
sub close_session {
|
||||||
my ($self) = @_;
|
my ($self) = @_;
|
||||||
$self->{dbh}->do("update sessions set end_date=? where id=?", {}, time(), $self->{session_id});
|
$self->{dbh}->do("update sessions set end_date=? where id=?", {}, time(), $self->{session_id});
|
||||||
|
$self->close_file_connection;
|
||||||
|
delete $self->{target};
|
||||||
|
}
|
||||||
|
|
||||||
|
sub close_file_connection {
|
||||||
|
my ($self) = @_;
|
||||||
if ($self->{file_pid}) {
|
if ($self->{file_pid}) {
|
||||||
close($self->{file_cfd});
|
close($self->{file_cfd});
|
||||||
close($self->{file_dfd});
|
close($self->{file_dfd});
|
||||||
|
@ -484,7 +491,6 @@ sub close_session {
|
||||||
delete $self->{file_dfd};
|
delete $self->{file_dfd};
|
||||||
delete $self->{file_pid};
|
delete $self->{file_pid};
|
||||||
}
|
}
|
||||||
delete $self->{target};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
sub get_last_session_id {
|
sub get_last_session_id {
|
||||||
|
@ -567,10 +573,12 @@ can be created to an existing file, create a new one.
|
||||||
sub store_file {
|
sub store_file {
|
||||||
my ($self, $f) = @_;
|
my ($self, $f) = @_;
|
||||||
|
|
||||||
|
my $success = 1;
|
||||||
|
|
||||||
if($self->present($f)) {
|
if($self->present($f)) {
|
||||||
if (link("$self->{last_backup}/$f->{name}", "$self->{this_backup}/$f->{name}")) {
|
if (link("$self->{last_backup}/$f->{name}", "$self->{this_backup}/$f->{name}")) {
|
||||||
$self->log(10, "linked");
|
$self->log(10, "linked");
|
||||||
return;
|
return $success;
|
||||||
} else {
|
} else {
|
||||||
$self->log(5, "cannot link $self->{last_backup}/$f->{name} to $self->{this_backup}/$f->{name}: $!");
|
$self->log(5, "cannot link $self->{last_backup}/$f->{name} to $self->{this_backup}/$f->{name}: $!");
|
||||||
}
|
}
|
||||||
|
@ -615,6 +623,7 @@ sub store_file {
|
||||||
$trailer = $self->{file_dfd}->getline;
|
$trailer = $self->{file_dfd}->getline;
|
||||||
if ($trailer =~ /^fail /) {
|
if ($trailer =~ /^fail /) {
|
||||||
$self->log(5, $trailer);
|
$self->log(5, $trailer);
|
||||||
|
$success = 0;
|
||||||
} elsif ($trailer =~ /^chk sha1 (\w+)/) {
|
} elsif ($trailer =~ /^chk sha1 (\w+)/) {
|
||||||
my $checksum = $sha1->hexdigest;
|
my $checksum = $sha1->hexdigest;
|
||||||
if ($checksum ne $1) {
|
if ($checksum ne $1) {
|
||||||
|
@ -623,6 +632,8 @@ sub store_file {
|
||||||
$f->{checksum} = $checksum;
|
$f->{checksum} = $checksum;
|
||||||
} else {
|
} else {
|
||||||
$self->log(5, "unexpected trailer $trailer\n");
|
$self->log(5, "unexpected trailer $trailer\n");
|
||||||
|
$self->close_file_connection;
|
||||||
|
$success = 0;
|
||||||
}
|
}
|
||||||
unless ($self->linkdup($f, $backup_filename)) {
|
unless ($self->linkdup($f, $backup_filename)) {
|
||||||
$self->setmeta($f);
|
$self->setmeta($f);
|
||||||
|
@ -630,13 +641,10 @@ sub store_file {
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
$self->log(5, "unexpected header $header\n");
|
$self->log(5, "unexpected header $header\n");
|
||||||
# How can we recover here?
|
$self->close_file_connection;
|
||||||
# Kill connection so that a new one is created for the next file
|
$success = 0;
|
||||||
# Return failure so that this file is not recorded as done
|
|
||||||
#
|
|
||||||
# same problem for other failures in this method.
|
|
||||||
die "unexpected header $header";
|
|
||||||
}
|
}
|
||||||
|
return $success;
|
||||||
}
|
}
|
||||||
|
|
||||||
sub DESTROY {
|
sub DESTROY {
|
||||||
|
|
Loading…
Reference in New Issue