|
From: <gi...@ba...> - 2012-11-09 19:37:29
|
This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "Bacula Community source".
The branch, Branch-5.2 has been updated
via aba06a622caf5a50da1e96425f0d1be1e7ad11bf (commit)
via 0d3537772147b13e7a3ce7cf8722a56820b33f51 (commit)
via 7a7806b7c33f4bdbbd4fd5196ecce9f9765d8e46 (commit)
via d2c6005a09caf7f4edf40e929c5d24d67e077024 (commit)
via 3e54fae00b19359c4b92baddc652a011a13f268d (commit)
via 03fb2c48a9ee5b2f6806ca9de8ca0ad9316f1b8a (commit)
via ade1120c8d9767ea787a1a7679470d1acf2a8be5 (commit)
via 47ddec05e9bcd8b72882acc204f7d424b948c123 (commit)
from 8e292f03a65c496ef17ec23b91f42c0e1ac5bea6 (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
- Log -----------------------------------------------------------------
commit aba06a622caf5a50da1e96425f0d1be1e7ad11bf
Author: Marco van Wieringen <mv...@pl...>
Date: Thu Nov 8 10:10:40 2012 +0100
Fix bug #1948 MailOnSuccess get executed on error.
MailOnSuccess running when Job is cancelled or ends with errors.
commit 0d3537772147b13e7a3ce7cf8722a56820b33f51
Author: Eric Bollengier <er...@ba...>
Date: Wed Nov 7 16:03:46 2012 +0100
Fix missing index on Media table
commit 7a7806b7c33f4bdbbd4fd5196ecce9f9765d8e46
Author: Marco van Wieringen <mv...@pl...>
Date: Tue Nov 6 15:22:26 2012 +0100
Fix bug #1943 no message storage on closed database connection.
Bacula director crashes if it reaches the max number connections to
MySQL server. (Or any other database) as it tries to store the
failure to connect to the database in the database over a non
connected database connection. Things like mysql_real_escape_string
which is used in that code path depend on a connected database
connection which we don't have.
commit d2c6005a09caf7f4edf40e929c5d24d67e077024
Author: Marco van Wieringen <mv...@pl...>
Date: Tue Nov 6 14:29:19 2012 +0100
Fix bug #1946 Fix problem with MySQL with big Base jobs.
This patch adds an index on the temporary table "basefilesXXX",
where XXX is the JobId. This index reduce heavily the time for full
backups with big BaseJobs with bacula 5.2 and MySQL backend.
commit 3e54fae00b19359c4b92baddc652a011a13f268d
Author: Eric Bollengier <er...@ba...>
Date: Fri Nov 2 10:08:06 2012 +0100
Fix #5346 .bvfs_lsfiles and .bvfs_restore to handle deleted files
commit 03fb2c48a9ee5b2f6806ca9de8ca0ad9316f1b8a
Author: Eric Bollengier <er...@ba...>
Date: Thu Nov 1 21:42:04 2012 +0100
regress: Add test for bvfs-test with deleted files
commit ade1120c8d9767ea787a1a7679470d1acf2a8be5
Author: Eric Bollengier <er...@ba...>
Date: Thu Nov 1 20:40:40 2012 +0100
Fix spooldata, accurate and ingnoreduplicate run argument
When using "run accurate=yes job=MyJob yes", the job was not
using the accurate option. This is now fixed.
commit 47ddec05e9bcd8b72882acc204f7d424b948c123
Author: Kern Sibbald <ke...@si...>
Date: Sun Oct 21 10:35:03 2012 +0200
Add remote regress test script
-----------------------------------------------------------------------
Summary of changes:
diff --git a/bacula/src/cats/bvfs.c b/bacula/src/cats/bvfs.c
index 10b283e..27f76d5 100644
--- a/bacula/src/cats/bvfs.c
+++ b/bacula/src/cats/bvfs.c
@@ -1020,8 +1020,8 @@ bool Bvfs::compute_restore_list(char *fileid, char *dirid, char *hardlink,
goto bail_out;
}
- /* TODO: handle basejob and SQLite3 */
- Mmsg(query, sql_bvfs_select[db_get_type_index(db)], output_table, output_table);
+ Mmsg(query, sql_bvfs_select[db_get_type_index(db)],
+ output_table, output_table, output_table);
/* TODO: handle jobid filter */
Dmsg1(dbglevel_sql, "q=%s\n", query.c_str());
diff --git a/bacula/src/cats/make_postgresql_tables.in b/bacula/src/cats/make_postgresql_tables.in
index cb8d58d..9e57088 100644
--- a/bacula/src/cats/make_postgresql_tables.in
+++ b/bacula/src/cats/make_postgresql_tables.in
@@ -219,7 +219,8 @@ CREATE TABLE media
primary key (mediaid)
);
-create unique index media_volumename_id on media (volumename);
+CREATE UNIQUE INDEX media_volumename_id ON Media (VolumeName);
+CREATE INDEX media_poolid_idx ON Media (PoolId);
CREATE TABLE MediaType (
diff --git a/bacula/src/cats/sql_cmds.c b/bacula/src/cats/sql_cmds.c
index 860def1..6ebcb62 100644
--- a/bacula/src/cats/sql_cmds.c
+++ b/bacula/src/cats/sql_cmds.c
@@ -465,7 +465,8 @@ const char *create_temp_basefile[] = {
/* Mysql */
"CREATE TEMPORARY TABLE basefile%lld ("
"Path BLOB NOT NULL,"
- "Name BLOB NOT NULL)",
+ "Name BLOB NOT NULL,"
+ "INDEX (Path(255), Name(255)))",
/* Postgresql */
"CREATE TEMPORARY TABLE basefile%lld ("
@@ -817,17 +818,27 @@ const char *sql_get_max_connections[] = {
"SELECT 0"
};
-/* TODO: Check for corner cases with MySQL and SQLite3
+/*
* The Group By can return strange numbers when having multiple
* version of a file in the same dataset.
*/
+const char *default_sql_bvfs_select =
+"CREATE TABLE %s AS "
+"SELECT File.JobId, File.FileIndex, File.FileId "
+"FROM Job, File, ( "
+ "SELECT MAX(JobTDate) AS JobTDate, PathId, FilenameId "
+ "FROM btemp%s GROUP BY PathId, FilenameId "
+ ") AS T1 JOIN Filename USING (FilenameId) "
+"WHERE T1.JobTDate = Job.JobTDate "
+ "AND Job.JobId = File.JobId "
+ "AND T1.PathId = File.PathId "
+ "AND T1.FilenameId = File.FilenameId "
+ "AND File.FileIndex > 0 "
+ "AND Job.JobId IN (SELECT DISTINCT JobId FROM btemp%s) ";
+
const char *sql_bvfs_select[] = {
/* Mysql */
- "CREATE TABLE %s AS ( "
- "SELECT JobId, FileIndex, FileId, max(JobTDate) as JobTDate "
- "FROM btemp%s "
- "GROUP BY PathId, FilenameId "
- "HAVING FileIndex > 0)",
+ default_sql_bvfs_select,
/* Postgresql */
"CREATE TABLE %s AS ( "
@@ -841,12 +852,7 @@ const char *sql_bvfs_select[] = {
"WHERE FileIndex > 0)",
/* SQLite3 */
- "CREATE TABLE %s AS "
- "SELECT JobId, FileIndex, FileId, max(JobTDate) as JobTDate "
- "FROM btemp%s "
- "GROUP BY PathId, FilenameId "
- "HAVING FileIndex > 0",
-
+ default_sql_bvfs_select,
/* Ingres (TODO) */
"SELECT 0"
@@ -867,13 +873,15 @@ static const char *sql_bvfs_list_files_default =
"JOIN File USING (FileId) "
"JOIN Job ON (BaseJobId = Job.JobId) "
"WHERE BaseFiles.JobId IN (%s) AND PathId = %s "
- ") AS tmp GROUP BY PathId, FilenameId LIMIT %lld OFFSET %lld"
+ ") AS tmp GROUP BY PathId, FilenameId "
+ "LIMIT %lld OFFSET %lld"
") AS T1 JOIN Filename USING (FilenameId) "
"WHERE T1.JobTDate = Job.JobTDate "
"AND Job.JobId = File.JobId "
"AND T1.PathId = File.PathId "
"AND T1.FilenameId = File.FilenameId "
"AND Filename.Name != '' "
+ "AND File.FileIndex > 0 "
" %s " /* AND Name LIKE '' */
"AND (Job.JobId IN ( "
"SELECT DISTINCT BaseJobId FROM BaseFiles WHERE JobId IN (%s)) "
@@ -886,20 +894,24 @@ const char *sql_bvfs_list_files[] = {
/* JobId PathId JobId PathId WHERE? Filename? Limit Offset*/
/* Postgresql */
- "SELECT DISTINCT ON (FilenameId) 'F', PathId, T.FilenameId, "
- "Filename.Name, JobId, LStat, FileId "
- "FROM "
- "(SELECT FileId, JobId, PathId, FilenameId, FileIndex, LStat, MD5 "
- "FROM File WHERE JobId IN (%s) AND PathId = %s "
- "UNION ALL "
- "SELECT File.FileId, File.JobId, PathId, FilenameId, "
- "File.FileIndex, LStat, MD5 "
- "FROM BaseFiles JOIN File USING (FileId) "
- "WHERE BaseFiles.JobId IN (%s) AND File.PathId = %s "
- ") AS T JOIN Job USING (JobId) JOIN Filename USING (FilenameId) "
- " WHERE Filename.Name != '' "
- " %s " /* AND Name LIKE '' */
- "ORDER BY FilenameId, StartTime DESC LIMIT %lld OFFSET %lld",
+ "SELECT Type, PathId, FilenameId, Name, JobId, LStat, FileId "
+ "FROM ("
+ "SELECT DISTINCT ON (FilenameId) 'F' as Type, PathId, T.FilenameId, "
+ "Filename.Name, JobId, LStat, FileId, FileIndex "
+ "FROM "
+ "(SELECT FileId, JobId, PathId, FilenameId, FileIndex, LStat, MD5 "
+ "FROM File WHERE JobId IN (%s) AND PathId = %s "
+ "UNION ALL "
+ "SELECT File.FileId, File.JobId, PathId, FilenameId, "
+ "File.FileIndex, LStat, MD5 "
+ "FROM BaseFiles JOIN File USING (FileId) "
+ "WHERE BaseFiles.JobId IN (%s) AND File.PathId = %s "
+ ") AS T JOIN Job USING (JobId) JOIN Filename USING (FilenameId) "
+ " WHERE Filename.Name != '' "
+ " %s " /* AND Name LIKE '' */
+ "ORDER BY FilenameId, StartTime DESC "
+ ") AS A WHERE A.FileIndex > 0 "
+ "LIMIT %lld OFFSET %lld ",
/* SQLite */
sql_bvfs_list_files_default,
diff --git a/bacula/src/cats/update_postgresql_tables.in b/bacula/src/cats/update_postgresql_tables.in
index d737bcf..f97cb60 100644
--- a/bacula/src/cats/update_postgresql_tables.in
+++ b/bacula/src/cats/update_postgresql_tables.in
@@ -45,6 +45,9 @@ ALTER TABLE File ADD COLUMN DeltaSeq smallint default 0;
UPDATE Version SET VersionId=14;
COMMIT;
+set client_min_messages = fatal;
+CREATE INDEX media_poolid_idx on Media (PoolId);
+
ANALYSE;
END-OF-DATA
diff --git a/bacula/src/dird/ua_run.c b/bacula/src/dird/ua_run.c
index e9c563d..f66bdd5 100644
--- a/bacula/src/dird/ua_run.c
+++ b/bacula/src/dird/ua_run.c
@@ -208,24 +208,6 @@ int modify_job_parameters(UAContext *ua, JCR *jcr, run_ctx &rc)
{
int i, opt;
- /* Some options are not available through the menu
- * TODO: Add an advanced menu?
- */
- if (rc.spool_data_set) {
- jcr->spool_data = rc.spool_data;
- }
-
- if (rc.accurate_set) {
- jcr->accurate = rc.accurate;
- }
-
- /* Used by migration jobs that can have the same name,
- * but can run at the same time
- */
- if (rc.ignoreduplicatecheck_set) {
- jcr->IgnoreDuplicateJobChecking = rc.ignoreduplicatecheck;
- }
-
/*
* At user request modify parameters of job to be run.
*/
@@ -570,6 +552,25 @@ static bool reset_restore_context(UAContext *ua, JCR *jcr, run_ctx &rc)
jcr->RestoreJobId = str_to_int64(rc.jid);
rc.jid = 0;
}
+
+ /* Some options are not available through the menu
+ * TODO: Add an advanced menu?
+ */
+ if (rc.spool_data_set) {
+ jcr->spool_data = rc.spool_data;
+ }
+
+ if (rc.accurate_set) {
+ jcr->accurate = rc.accurate;
+ }
+
+ /* Used by migration jobs that can have the same name,
+ * but can run at the same time
+ */
+ if (rc.ignoreduplicatecheck_set) {
+ jcr->IgnoreDuplicateJobChecking = rc.ignoreduplicatecheck;
+ }
+
return true;
}
diff --git a/bacula/src/lib/message.c b/bacula/src/lib/message.c
index e2fd3e4..c510563 100644
--- a/bacula/src/lib/message.c
+++ b/bacula/src/lib/message.c
@@ -544,20 +544,39 @@ void close_msg(JCR *jcr)
if (!d->fd) {
break;
}
- if (
- (d->dest_code == MD_MAIL_ON_ERROR && jcr &&
- (jcr->JobStatus == JS_Terminated || jcr->JobStatus == JS_Warnings))
- ||
- (d->dest_code == MD_MAIL_ON_SUCCESS && jcr &&
- jcr->JobStatus == JS_ErrorTerminated)
- ) {
- goto rem_temp_file;
+
+ switch (d->dest_code) {
+ case MD_MAIL_ON_ERROR:
+ if (jcr) {
+ switch (jcr->JobStatus) {
+ case JS_Terminated:
+ case JS_Warnings:
+ goto rem_temp_file;
+ default:
+ break;
+ }
+ }
+ break;
+ case MD_MAIL_ON_SUCCESS:
+ if (jcr) {
+ switch (jcr->JobStatus) {
+ case JS_Terminated:
+ case JS_Warnings:
+ break;
+ default:
+ goto rem_temp_file;
+ }
+ }
+ break;
+ default:
+ break;
}
if (!(bpipe=open_mail_pipe(jcr, cmd, d))) {
Pmsg0(000, _("open mail pipe failed.\n"));
goto rem_temp_file;
}
+
Dmsg0(850, "Opened mail pipe\n");
len = d->max_len+10;
line = get_memory(len);
@@ -798,7 +817,7 @@ void dispatch_message(JCR *jcr, int type, utime_t mtime, char *msg)
switch (d->dest_code) {
case MD_CATALOG:
char ed1[50];
- if (!jcr || !jcr->db) {
+ if (!jcr || !jcr->db || !jcr->db->is_connected()) {
break;
}
if (p_sql_query && p_sql_escape) {
@@ -806,7 +825,7 @@ void dispatch_message(JCR *jcr, int type, utime_t mtime, char *msg)
POOLMEM *esc_msg = get_pool_memory(PM_MESSAGE);
int len = strlen(msg) + 1;
- esc_msg = check_pool_memory_size(esc_msg, len*2+1);
+ esc_msg = check_pool_memory_size(esc_msg, len * 2 + 1);
p_sql_escape(jcr, jcr->db, esc_msg, msg, len);
bstrutime(dt, sizeof(dt), mtime);
diff --git a/regress/rtest b/regress/rtest
new file mode 100755
index 0000000..b613b2f
--- /dev/null
+++ b/regress/rtest
@@ -0,0 +1,34 @@
+#!/bin/sh
+#
+# Run a regression test script on a remote machine
+#
+# called: rtest <machine> <branch> <test-script>
+#
+# Note: expects remote source to be in the same place
+# as defined in regress/config on your calling machine
+# Otherwise, you must define BACULA_SOURCE after the
+# . ./config
+
+. ./config
+
+if test $# != 3 ; then
+ echo "Called: rtest <machine> <branch> <test-script>"
+ exit 1
+fi
+
+host=$1
+branch=$2
+run=$3
+
+ssh ${host} <<EOF
+ cd ${BACULA_SOURCE}
+ # remove any modified version.h
+ git checkout src/version.h
+ git checkout ${branch}
+ git pull
+ cd ../regress
+ ${run}
+EOF
+# Get test results
+scp ${host}:${BACULA_SOURCE}/../regress/test.out rtest.out
+cat rtest.out
diff --git a/regress/tests/bvfs-test b/regress/tests/bvfs-test
index 49ed3d4..aabd9a3 100755
--- a/regress/tests/bvfs-test
+++ b/regress/tests/bvfs-test
@@ -137,56 +137,164 @@ END_OF_DATA
run_bconsole
sleep 1
-touch ${tmpsrc}/ficheriro1.txt
+cat /etc/hosts >> ${tmpsrc}/ficheriro1.txt
+rm -f ${tmpsrc}/dird_conf.c
touch ${tmpsrc}/ficheriro2.txt
+ln ${tmpsrc}/ficheriro2.txt ${tmpsrc}/hardlink2
cat <<END_OF_DATA >${tmp}/bconcmds
@$out /dev/null
messages
@$out ${tmp}/log1.out
-run level=Incremental job=$JobName yes
-wait
-messages
-@#
-@# now do a restore
-@#
-@$out ${tmp}/log2.out
-setdebug level=10 storage=File
-restore where=${tmp}/bacula-restores storage=File file=<${tmp}/restore-list
+run level=Incremental accurate=yes job=$JobName
yes
wait
-status storage=File
messages
@$out ${tmp}/log3.out
-setdebug level=10 director
.bvfs_update
-sql
-SELECT HasCache FROM Job WHERE Type='B';
+.bvfs_lsdir path=$tmpsrc/ jobid=1,2,3,4,5,6,7,8
+.bvfs_lsfile path=$tmpsrc/ jobid=1,2,3,4,5,6,7,8
+@$out ${tmp}/log31.out
+.bvfs_lsfile path=$tmpsrc/ jobid=1,2,3,4,5,6,7
+quit
+END_OF_DATA
+
+run_bconsole
+
+cat <<END_OF_DATA >${tmp}/bconcmds
+@$out $tmp/log4.out
+.bvfs_cleanup path=b21
+END_OF_DATA
+
+
+# we look in the lsdir/lsfiles output to get filenameid and pathid for version and restore
+awk '/ficheriro1.txt/ {
+ print ".bvfs_versions jobid=" $4 " fnid=" $2 " pathid=" $1 " client='$CLIENT'"
+ print ".bvfs_restore jobid=" $4 " fileid=" $3 " path=b21"
+} ' $tmp/log3.out >> $tmp/bconcmds
+
+cat <<END_OF_DATA >>${tmp}/bconcmds
+restore file="?b21" where=$tmp/bacula-restores yes
+wait
+messages
+quit
+END_OF_DATA
+
+run_bconsole
+
+diff $tmpsrc/ficheriro1.txt $tmp/bacula-restores/$tmpsrc/ficheriro1.txt &>> $tmp/d1
+if [ $? != 0 ]; then
+ print_debug "ERROR: ficheriro1.txt is different"
+ rstat=1
+fi
-.bvfs_lsdir path=${tmpsrc}/ jobid=1,2,3,4,5,6,7,8
-.bvfs_lsfile path=${tmpsrc}/ jobid=1,2,3,4,5,6,7,8
+rm -rf $tmp/bacula-restores
+
+cat <<END_OF_DATA >${tmp}/bconcmds
+@$out $tmp/log4.out
+@# Try to restore a directory
+.bvfs_cleanup path=b21
+END_OF_DATA
+
+awk '/ficheriro2.txt/ {
+ print ".bvfs_restore jobid=1,2,3,4,5,6,7,8 dirid=" $1 " path=b21"
+} ' $tmp/log3.out >> $tmp/bconcmds
+
+cat <<END_OF_DATA >>${tmp}/bconcmds
+@$out $tmp/log2.out
+restore file="?b21" where=$tmp/bacula-restores yes
+wait
+messages
quit
END_OF_DATA
run_bconsole
+
+$rscripts/diff.pl -s $tmpsrc/ -d $tmp/bacula-restores/$tmpsrc/
+if [ $? != 0 ]; then
+ print_debug "ERROR: Found error while restoring a directory"
+ rstat=1
+fi
+
+diff $tmpsrc/ficheriro2.txt $tmp/bacula-restores/$tmpsrc/ficheriro2.txt
+if [ $? != 0 ]; then
+ print_debug "ERROR: ficheriro2.txt is different"
+ rstat=1
+fi
+
+rm -rf $tmp/bacula-restores
+
+cat <<END_OF_DATA >${tmp}/bconcmds
+@$out $tmp/log4.out
+@# Try to restore a hardlinks
+.bvfs_cleanup path=b21
+END_OF_DATA
+
+jidx=`awk -F '\t' '/ficheriro2.txt|hardlinks2/ { lstat=$5 }
+ END { print ".bvfs_decode_lstat lstat=\"" lstat "\""
+} ' $tmp/log3.out | $bin/bconsole -c $conf/bconsole.conf | awk -F= '/LinkFI/ { print $2 } '`
+
+awk '/ficheriro2.txt/ { if (!id) { id=$3 } } /hardlink2/ { if (!id) { id=$3 } }
+ END {
+ print ".bvfs_restore jobid=1,2,3,4,5,6,7,8 hardlink=8,'$jidx' fileid=" id " path=b21"
+ } ' $tmp/log3.out >> $tmp/bconcmds
+
+cat <<END_OF_DATA >>${tmp}/bconcmds
+@$out $tmp/log2.out
+restore file="?b21" where=$tmp/bacula-restores yes
+wait
+messages
+quit
+END_OF_DATA
+
+run_bconsole
+
+test -f $tmp/bacula-restores/$tmpsrc/hardlink2 -a -f $tmp/bacula-restores/$tmpsrc/ficheriro2.txt
+if [ $? != 0 ]; then
+ print_debug "ERROR: Need to get all files"
+ rstat=1
+fi
+
+# Compare Inode
+ls -i $tmp/bacula-restores/$tmpsrc/hardlink2 | awk ' { print $1 } ' > $tmp/1
+ls -i $tmp/bacula-restores/$tmpsrc/ficheriro2.txt | awk ' { print $1 } ' > $tmp/2
+diff $tmp/1 $tmp/2 > /dev/null
+if [ $? != 0 ]; then
+ print_debug "ERROR: Both files should have the same inode number"
+ rstat=1
+fi
+
check_for_zombie_jobs storage=File
stop_bacula
check_two_logs
-#
-# Delete .c files because we will only restored the txt files
-#
-rm -f ${tmpsrc}/*.c
-check_restore_tmp_build_diff
-${cwd}/build/src/tools/bvfs_test -T -p ${tmpsrc} -j 1,2,3,4,5,6,7,8 \
- -w "$working" -n "$db_name" -u "$db_user" -P "$db_password"
+grep dird_conf.c $tmp/log3.out > /dev/null
+if [ $? = 0 ]; then
+ print_debug "ERROR: Should not find dird_conf.c in bvfs_lsfiles output $tmp/log3.out"
+ estat=1
+fi
-${cwd}/build/src/tools/bvfs_test -p ${cwd}/build/ -j 1,2,3,4,5,6,7,8 \
- -w "$working" -n "$db_name" -u "$db_user" -P "$db_password"
+grep dird_conf.c $tmp/log31.out > /dev/null
+if [ $? != 0 ]; then
+ print_debug "ERROR: Should find dird_conf.c in bvfs_lsfiles output $tmp/log31.out"
+ estat=1
+fi
-${cwd}/build/src/tools/bvfs_test -p ${tmpsrc} -j 1,2,3,4,5,6,7,8 \
- -w "$working" -n "$db_name" -u "$db_user" -P "$db_password" \
- -f ficheriro1.txt -c ${HOST}-fd
+#
+# Delete .c files because we will only restored the txt files
+#
+#rm -f ${tmpsrc}/*.c
+#check_restore_tmp_build_diff
+#
+#${cwd}/build/src/tools/bvfs_test -T -p ${tmpsrc} -j 1,2,3,4,5,6,7,8 \
+# -w "$working" -n "$db_name" -u "$db_user" -P "$db_password"
+#
+#${cwd}/build/src/tools/bvfs_test -p ${cwd}/build/ -j 1,2,3,4,5,6,7,8 \
+# -w "$working" -n "$db_name" -u "$db_user" -P "$db_password"
+#
+#${cwd}/build/src/tools/bvfs_test -p ${tmpsrc} -j 1,2,3,4,5,6,7,8 \
+# -w "$working" -n "$db_name" -u "$db_user" -P "$db_password" \
+# -f ficheriro1.txt -c ${HOST}-fd
end_test
hooks/post-receive
--
Bacula Community source
|