Thread: [cedar-backup-svn] SF.net SVN: cedar-backup:[1048] cedar-backup2/trunk/CedarBackup2/extend/ amazons
Brought to you by:
pronovic
|
From: <pro...@us...> - 2014-10-01 02:00:08
|
Revision: 1048
http://sourceforge.net/p/cedar-backup/code/1048
Author: pronovic
Date: 2014-10-01 01:59:59 +0000 (Wed, 01 Oct 2014)
Log Message:
-----------
Debugging
Modified Paths:
--------------
cedar-backup2/trunk/CedarBackup2/extend/amazons3.py
Modified: cedar-backup2/trunk/CedarBackup2/extend/amazons3.py
===================================================================
--- cedar-backup2/trunk/CedarBackup2/extend/amazons3.py 2014-10-01 01:44:17 UTC (rev 1047)
+++ cedar-backup2/trunk/CedarBackup2/extend/amazons3.py 2014-10-01 01:59:59 UTC (rev 1048)
@@ -423,10 +423,10 @@
@param s3Bucket: The Amazon S3 bucket to use as the target
@return: S3 bucket URL, with no trailing slash
"""
- subdir = dailyDir.replace("/opt/backup/staging", "")
+ subdir = dailyDir.replace(stagingDir, "")
if subdir.startswith("/"):
subdir = subdir[1:]
- return "s3://%s/staging/%s" % (s3Bucket, dailyDir)
+ return "s3://%s/%s" % (s3Bucket, dailyDir)
##################################
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <pro...@us...> - 2014-10-01 02:08:17
|
Revision: 1049
http://sourceforge.net/p/cedar-backup/code/1049
Author: pronovic
Date: 2014-10-01 02:08:09 +0000 (Wed, 01 Oct 2014)
Log Message:
-----------
Fix warnings from 'make check'
Modified Paths:
--------------
cedar-backup2/trunk/CedarBackup2/extend/amazons3.py
Modified: cedar-backup2/trunk/CedarBackup2/extend/amazons3.py
===================================================================
--- cedar-backup2/trunk/CedarBackup2/extend/amazons3.py 2014-10-01 01:59:59 UTC (rev 1048)
+++ cedar-backup2/trunk/CedarBackup2/extend/amazons3.py 2014-10-01 02:08:09 UTC (rev 1049)
@@ -79,7 +79,7 @@
from CedarBackup2.util import resolveCommand, executeCommand
from CedarBackup2.xmlutil import createInputDom, addContainerNode, addStringNode
from CedarBackup2.xmlutil import readFirstChild, readString
-from CedarBackup2.actions.util import findDailyDirs, writeIndicatorFile, getBackupFiles
+from CedarBackup2.actions.util import findDailyDirs, writeIndicatorFile
########################################################################
@@ -384,7 +384,7 @@
local = LocalConfig(xmlPath=configPath)
dailyDirs = findDailyDirs(config.stage.targetDir, STORE_INDICATOR)
for dailyDir in dailyDirs:
- _storeDailyDir(dailyDir, local.amazons3.s3Bucket, config.options.backupUser, config.options.backupGroup)
+ _storeDailyDir(config.stage.targetDir, dailyDir, local.amazons3.s3Bucket)
writeIndicatorFile(dailyDir, STORE_INDICATOR, config.options.backupUser, config.options.backupGroup)
logger.info("Executed the amazons3 extended action successfully.")
@@ -397,14 +397,12 @@
# _storeDailyDir() function
############################
-def _storeDailyDir(stagingDir, dailyDir, s3Bucket, backupUser, backupGroup):
+def _storeDailyDir(stagingDir, dailyDir, s3Bucket):
"""
Store the contents of a daily staging directory to a bucket in the Amazon S3 cloud.
@param stagingDir: Configured staging directory (config.targetDir)
@param dailyDir: Daily directory to store in the cloud
@param s3Bucket: The Amazon S3 bucket to use as the target
- @param backupUser: User that target files should be owned by
- @param backupGroup: Group that target files should be owned by
"""
s3BucketUrl = _deriveS3BucketUrl(stagingDir, dailyDir, s3Bucket)
_clearExistingBackup(s3BucketUrl)
@@ -441,19 +439,20 @@
emptydir = tempfile.mkdtemp()
try:
command = resolveCommand(S3CMD_COMMAND)
- args = [ "sync", "--no-encrypt", "--recursive", "--delete-removed", emptyDir + "/", s3BucketUrl + "/", ]
+ args = [ "sync", "--no-encrypt", "--recursive", "--delete-removed", emptydir + "/", s3BucketUrl + "/", ]
result = executeCommand(command, args)[0]
if result != 0:
raise IOError("Error [%d] calling s3Cmd to clear existing backup [%s]." % (result, s3BucketUrl))
finally:
- os.rmdir(emptydir)
+ if os.path.exists(emptydir):
+ os.rmdir(emptydir)
############################
# _writeDailyDir() function
############################
-def __writeDailyDir(dailyDir, s3BucketUrl):
+def _writeDailyDir(dailyDir, s3BucketUrl):
"""
Write the daily directory out to the Amazon S3 cloud.
@param dailyDir: Daily directory to store
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <pro...@us...> - 2014-10-01 19:39:35
|
Revision: 1050
http://sourceforge.net/p/cedar-backup/code/1050
Author: pronovic
Date: 2014-10-01 19:39:24 +0000 (Wed, 01 Oct 2014)
Log Message:
-----------
Continued development on amazons3
Modified Paths:
--------------
cedar-backup2/trunk/CedarBackup2/extend/amazons3.py
Modified: cedar-backup2/trunk/CedarBackup2/extend/amazons3.py
===================================================================
--- cedar-backup2/trunk/CedarBackup2/extend/amazons3.py 2014-10-01 02:08:09 UTC (rev 1049)
+++ cedar-backup2/trunk/CedarBackup2/extend/amazons3.py 2014-10-01 19:39:24 UTC (rev 1050)
@@ -43,25 +43,28 @@
to be run immediately after the standard stage action, replacing the standard
store action. Aside from its own configuration, it requires the options and
staging configuration sections in the standard Cedar Backup configuration file.
+Since it is intended to replace the store action, it does not rely on any store
+configuration.
-This extension relies on the U{{Amazon S3Tools} <http://s3tools.org/>} package.
-It is a very thin wrapper around the C{s3cmd put} command. Before you use this
-extension, you need to set up your Amazon S3 account and configure C{s3cmd} as
-detailed in the U{{HOWTO} <http://s3tools.org/s3cmd-howto>}. The configured
-backup user will run the C{s3cmd} program, so make sure you configure S3 Tools
-as that user, and not root.
+The underlying functionality relies on the U{{Amazon S3Tools} <http://s3tools.org/>}
+package. It is a very thin wrapper around the C{s3cmd put} command. Before
+you use this extension, you need to set up your Amazon S3 account and configure
+C{s3cmd} as detailed in the U{{HOWTO} <http://s3tools.org/s3cmd-howto>}. The
+extension assumes that the backup is being executed as root, and switches over
+to the configured backup user to run the C{s3cmd} program. So, make sure you
+configure S3 Tools as the backup user and not root.
It's up to you how to configure the S3 Tools connection to Amazon, but I
-recommend that you configure GPG encrpytion using a strong passphrase. One way
-to generate a strong passphrase is using your random number generator, i.e.
-C{dd if=/dev/urandom count=20 bs=1 | xxd -ps}. (See U{{StackExchange}
+recommend that you configure GPG encryption using a strong passphrase. One way
+to generate a strong passphrase is using your system random number generator,
+i.e. C{dd if=/dev/urandom count=20 bs=1 | xxd -ps}. (See U{{StackExchange}
<http://security.stackexchange.com/questions/14867/gpg-encryption-security>}
-for more details about that advice.) If decide to use encryption, make sure you
-save off the passphrase in a safe place, so you can get at your backup data
+for more details about that advice.) If you decide to use encryption, make sure
+you save off the passphrase in a safe place, so you can get at your backup data
later if you need to.
-This extension was written for and tested on Linux. I do not expect it to
-work on non-UNIX platforms.
+This extension was written for and tested on Linux. It will throw an exception
+if run on Windows.
@author: Kenneth J. Pronovici <pro...@ie...>
"""
@@ -71,15 +74,18 @@
########################################################################
# System modules
+import sys
import os
import logging
import tempfile
+import datetime
# Cedar Backup modules
-from CedarBackup2.util import resolveCommand, executeCommand
+from CedarBackup2.util import resolveCommand, executeCommand, isRunningAsRoot
from CedarBackup2.xmlutil import createInputDom, addContainerNode, addStringNode
from CedarBackup2.xmlutil import readFirstChild, readString
-from CedarBackup2.actions.util import findDailyDirs, writeIndicatorFile
+from CedarBackup2.actions.util import writeIndicatorFile
+from CedarBackup2.actions.constants import DIR_TIME_FORMAT, STAGE_INDICATOR
########################################################################
@@ -88,7 +94,9 @@
logger = logging.getLogger("CedarBackup2.log.extend.amazons3")
+SU_COMMAND = [ "su" ]
S3CMD_COMMAND = [ "s3cmd", ]
+
STORE_INDICATOR = "cback.amazons3"
@@ -101,32 +109,35 @@
"""
Class representing Amazon S3 configuration.
- Amazon S3 configuration is used for storing staging directories
- in Amazon's cloud storage using the C{s3cmd} tool.
+ Amazon S3 configuration is used for storing backup data in Amazon's S3 cloud
+ storage using the C{s3cmd} tool.
The following restrictions exist on data in this class:
- The s3Bucket value must be a non-empty string
- @sort: __init__, __repr__, __str__, __cmp__, s3Bucket
+ @sort: __init__, __repr__, __str__, __cmp__, warnMidnite, s3Bucket
"""
- def __init__(self, s3Bucket=None):
+ def __init__(self, warnMidnite=None, s3Bucket=None):
"""
Constructor for the C{AmazonS3Config} class.
@param s3Bucket: Name of the Amazon S3 bucket in which to store the data
+ @param warnMidnite: Whether to generate warnings for crossing midnite.
@raise ValueError: If one of the values is invalid.
"""
+ self._warnMidnite = None
self._s3Bucket = None
+ self.warnMidnite = warnMidnite
self.s3Bucket = s3Bucket
def __repr__(self):
"""
Official string representation for class instance.
"""
- return "AmazonS3Config(%s)" % (self.s3Bucket)
+ return "AmazonS3Config(%s, %s)" % (self.warnMidnite, self.s3Bucket)
def __str__(self):
"""
@@ -137,12 +148,16 @@
def __cmp__(self, other):
"""
Definition of equals operator for this class.
- Lists within this class are "unordered" for equality comparisons.
@param other: Other object to compare to.
@return: -1/0/1 depending on whether self is C{<}, C{=} or C{>} other.
"""
if other is None:
return 1
+ if self.warnMidnite != other.warnMidnite:
+ if self.warnMidnite < other.warnMidnite:
+ return -1
+ else:
+ return 1
if self.s3Bucket != other.s3Bucket:
if self.s3Bucket < other.s3Bucket:
return -1
@@ -150,6 +165,22 @@
return 1
return 0
+ def _setWarnMidnite(self, value):
+ """
+ Property target used to set the midnite warning flag.
+ No validations, but we normalize the value to C{True} or C{False}.
+ """
+ if value:
+ self._warnMidnite = True
+ else:
+ self._warnMidnite = False
+
+ def _getWarnMidnite(self):
+ """
+ Property target used to get the midnite warning flag.
+ """
+ return self._warnMidnite
+
def _setS3Bucket(self, value):
"""
Property target used to set the S3 bucket.
@@ -165,7 +196,8 @@
"""
return self._s3Bucket
- s3Bucket = property(_getS3Bucket, _setS3Bucket, None, doc="Amazon S3 Bucket")
+ warnMidnite = property(_getWarnMidnite, _setWarnMidnite, None, "Whether to generate warnings for crossing midnite.")
+ s3Bucket = property(_getS3Bucket, _setS3Bucket, None, doc="Amazon S3 Bucket in which to store data")
########################################################################
@@ -379,88 +411,165 @@
@raise IOError: If there are I/O problems reading or writing files
"""
logger.debug("Executing amazons3 extended action.")
+ if not isRunningAsRoot():
+ logger.error("Error: the amazons3 extended action must be run as root.")
+ raise ValueError("The amazons3 extended action must be run as root.")
+ if sys.platform == "win32":
+ logger.error("Error: the amazons3 extended action is not supported on Windows.")
+ raise ValueError("The amazons3 extended action is not supported on Windows.")
if config.options is None or config.stage is None:
raise ValueError("Cedar Backup configuration is not properly filled in.")
local = LocalConfig(xmlPath=configPath)
- dailyDirs = findDailyDirs(config.stage.targetDir, STORE_INDICATOR)
- for dailyDir in dailyDirs:
- _storeDailyDir(config.stage.targetDir, dailyDir, local.amazons3.s3Bucket)
- writeIndicatorFile(dailyDir, STORE_INDICATOR, config.options.backupUser, config.options.backupGroup)
+ stagingDirs = _findCorrectDailyDir(options, config, local)
+ _writeToAmazonS3(config, local, stagingDirs)
+ _writeStoreIndicator(config, stagingDirs)
logger.info("Executed the amazons3 extended action successfully.")
########################################################################
-# Utility functions
+# Private utility functions
########################################################################
-############################
-# _storeDailyDir() function
-############################
+#########################
+# _findCorrectDailyDir()
+#########################
-def _storeDailyDir(stagingDir, dailyDir, s3Bucket):
+def _findCorrectDailyDir(options, config, local):
"""
- Store the contents of a daily staging directory to a bucket in the Amazon S3 cloud.
- @param stagingDir: Configured staging directory (config.targetDir)
- @param dailyDir: Daily directory to store in the cloud
- @param s3Bucket: The Amazon S3 bucket to use as the target
+ Finds the correct daily staging directory to be written to Amazon S3.
+
+ This is substantially similar to the same function in store.py. The
+ main difference is that it doesn't rely on store configuration at all.
+
+ @param options: Options object.
+ @param config: Config object.
+ @param local: Local config object.
+
+ @return: Correct staging dir, as a dict mapping directory to date suffix.
+ @raise IOError: If the staging directory cannot be found.
"""
- s3BucketUrl = _deriveS3BucketUrl(stagingDir, dailyDir, s3Bucket)
- _clearExistingBackup(s3BucketUrl)
- _writeDailyDir(dailyDir, s3BucketUrl)
+ oneDay = datetime.timedelta(days=1)
+ today = datetime.date.today()
+ yesterday = today - oneDay
+ tomorrow = today + oneDay
+ todayDate = today.strftime(DIR_TIME_FORMAT)
+ yesterdayDate = yesterday.strftime(DIR_TIME_FORMAT)
+ tomorrowDate = tomorrow.strftime(DIR_TIME_FORMAT)
+ todayPath = os.path.join(config.stage.targetDir, todayDate)
+ yesterdayPath = os.path.join(config.stage.targetDir, yesterdayDate)
+ tomorrowPath = os.path.join(config.stage.targetDir, tomorrowDate)
+ todayStageInd = os.path.join(todayPath, STAGE_INDICATOR)
+ yesterdayStageInd = os.path.join(yesterdayPath, STAGE_INDICATOR)
+ tomorrowStageInd = os.path.join(tomorrowPath, STAGE_INDICATOR)
+ todayStoreInd = os.path.join(todayPath, STORE_INDICATOR)
+ yesterdayStoreInd = os.path.join(yesterdayPath, STORE_INDICATOR)
+ tomorrowStoreInd = os.path.join(tomorrowPath, STORE_INDICATOR)
+ if options.full:
+ if os.path.isdir(todayPath) and os.path.exists(todayStageInd):
+ logger.info("Amazon S3 process will use current day's staging directory [%s]" % todayPath)
+ return { todayPath:todayDate }
+ raise IOError("Unable to find staging directory to process (only tried today due to full option).")
+ else:
+ if os.path.isdir(todayPath) and os.path.exists(todayStageInd) and not os.path.exists(todayStoreInd):
+ logger.info("Amazon S3 process will use current day's staging directory [%s]" % todayPath)
+ return { todayPath:todayDate }
+ elif os.path.isdir(yesterdayPath) and os.path.exists(yesterdayStageInd) and not os.path.exists(yesterdayStoreInd):
+ logger.info("Amazon S3 process will use previous day's staging directory [%s]" % yesterdayPath)
+ if local.amazons3.warnMidnite:
+ logger.warn("Warning: Amazon S3 process crossed midnite boundary to find data.")
+ return { yesterdayPath:yesterdayDate }
+ elif os.path.isdir(tomorrowPath) and os.path.exists(tomorrowStageInd) and not os.path.exists(tomorrowStoreInd):
+ logger.info("Amazon S3 process will use next day's staging directory [%s]" % tomorrowPath)
+ if local.amazons3.warnMidnite:
+ logger.warn("Warning: Amazon S3 process crossed midnite boundary to find data.")
+ return { tomorrowPath:tomorrowDate }
+ raise IOError("Unable to find unused staging directory to process (tried today, yesterday, tomorrow).")
##############################
-# _deriveBucketUrl() function
+# _writeToAmazonS3() function
##############################
-def _deriveS3BucketUrl(stagingDir, dailyDir, s3Bucket):
+def _writeToAmazonS3(config, local, stagingDirs):
"""
- Derive the correct bucket URL for a daily directory.
- @param stagingDir: Configured staging directory (config.targetDir)
- @param dailyDir: Daily directory to store
- @param s3Bucket: The Amazon S3 bucket to use as the target
- @return: S3 bucket URL, with no trailing slash
+ Writes the indicated staging directories to an Amazon S3 bucket.
+
+ Each of the staging directories listed in C{stagingDirs} will be written to
+ the configured Amazon S3 bucket from local configuration. The directories
+ will be placed into the image at the root by date, so staging directory
+ C{/opt/stage/2005/02/10} will be placed into the S3 bucket at C{/2005/02/10}.
+
+ @param config: Config object.
+ @param local: Local config object.
+ @param stagingDirs: Dictionary mapping directory path to date suffix.
+
+ @raise ValueError: Under many generic error conditions
+ @raise IOError: If there is a problem writing to Amazon S3
"""
- subdir = dailyDir.replace(stagingDir, "")
- if subdir.startswith("/"):
- subdir = subdir[1:]
- return "s3://%s/%s" % (s3Bucket, dailyDir)
+ for stagingDir in stagingDirs.keys():
+ logger.debug("Storing stage directory to Amazon S3 [%s]." % stagingDir)
+ dateSuffix = stagingDirs[stagingDir]
+ s3BucketUrl = "s3://%s/%s" % (local.amazons3.s3Bucket, dateSuffix)
+ logger.debug("S3 bucket URL is [%s]" % s3BucketUrl)
+ _clearExistingBackup(config, s3BucketUrl)
+ _writeStagingDir(config, stagingDir, s3BucketUrl)
##################################
+# _writeStoreIndicator() function
+##################################
+
+def _writeStoreIndicator(config, stagingDirs):
+ """
+ Writes a store indicator file into staging directories.
+ @param config: Config object.
+ @param stagingDirs: Dictionary mapping directory path to date suffix.
+ """
+ for stagingDir in stagingDirs.keys():
+ writeIndicatorFile(stagingDir, STORE_INDICATOR,
+ config.options.backupUser,
+ config.options.backupGroup)
+
+
+##################################
# _clearExistingBackup() function
##################################
-def _clearExistingBackup(s3BucketUrl):
+def _clearExistingBackup(config, s3BucketUrl):
"""
- Clear any existing backup files for a daily directory.
- @param s3BucketUrl: S3 bucket URL derived for the daily directory
+ Clear any existing backup files for an S3 bucket URL.
+ @param config: Config object.
+ @param s3BucketUrl: S3 bucket URL derived for the staging directory
"""
- emptydir = tempfile.mkdtemp()
+ emptyDir = tempfile.mkdtemp()
try:
- command = resolveCommand(S3CMD_COMMAND)
- args = [ "sync", "--no-encrypt", "--recursive", "--delete-removed", emptydir + "/", s3BucketUrl + "/", ]
- result = executeCommand(command, args)[0]
+ suCommand = resolveCommand(SU_COMMAND)
+ s3CmdCommand = resolveCommand(S3CMD_COMMAND)
+ actualCommand = "%s sync --no-encrypt --recursive --delete-removed %s/ %s/" % (s3CmdCommand, emptyDir, s3BucketUrl)
+ result = executeCommand(suCommand, [config.options.backupUser, "-c", actualCommand])[0]
if result != 0:
raise IOError("Error [%d] calling s3Cmd to clear existing backup [%s]." % (result, s3BucketUrl))
finally:
- if os.path.exists(emptydir):
- os.rmdir(emptydir)
+ if os.path.exists(emptyDir):
+ os.rmdir(emptyDir)
-############################
-# _writeDailyDir() function
-############################
+###########################
+# _writeStaging() function
+###########################
-def _writeDailyDir(dailyDir, s3BucketUrl):
+def _writeStagingDir(config, stagingDir, s3BucketUrl):
"""
- Write the daily directory out to the Amazon S3 cloud.
- @param dailyDir: Daily directory to store
- @param s3BucketUrl: S3 bucket URL derived for the daily directory
+ Write a staging directory out to the Amazon S3 cloud.
+ @param config: Config object.
+ @param stagingDir: Staging directory to write
+ @param s3BucketUrl: S3 bucket URL derived for the staging directory
"""
- command = resolveCommand(S3CMD_COMMAND)
- args = [ "put", "--recursive", dailyDir + "/", s3BucketUrl + "/", ]
- result = executeCommand(command, args)[0]
+ suCommand = resolveCommand(SU_COMMAND)
+ s3CmdCommand = resolveCommand(S3CMD_COMMAND)
+ actualCommand = "%s put --recursive %s/ %s/" % (s3CmdCommand, stagingDir, s3BucketUrl)
+ result = executeCommand(suCommand, [config.options.backupUser, "-c", actualCommand])[0]
if result != 0:
- raise IOError("Error [%d] calling s3Cmd to store daily directory [%s]." % (result, s3BucketUrl))
+ raise IOError("Error [%d] calling s3Cmd to store staging directory [%s]." % (result, s3BucketUrl))
+
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <pro...@us...> - 2014-10-01 20:30:23
|
Revision: 1052
http://sourceforge.net/p/cedar-backup/code/1052
Author: pronovic
Date: 2014-10-01 20:30:20 +0000 (Wed, 01 Oct 2014)
Log Message:
-----------
Fixes from debugging on daystrom
Modified Paths:
--------------
cedar-backup2/trunk/CedarBackup2/extend/amazons3.py
Modified: cedar-backup2/trunk/CedarBackup2/extend/amazons3.py
===================================================================
--- cedar-backup2/trunk/CedarBackup2/extend/amazons3.py 2014-10-01 20:16:04 UTC (rev 1051)
+++ cedar-backup2/trunk/CedarBackup2/extend/amazons3.py 2014-10-01 20:30:20 UTC (rev 1052)
@@ -549,7 +549,7 @@
try:
suCommand = resolveCommand(SU_COMMAND)
s3CmdCommand = resolveCommand(S3CMD_COMMAND)
- actualCommand = "%s sync --no-encrypt --recursive --delete-removed %s/ %s/" % (s3CmdCommand, emptyDir, s3BucketUrl)
+ actualCommand = "%s sync --no-encrypt --recursive --delete-removed %s/ %s/" % (s3CmdCommand[0], emptyDir, s3BucketUrl)
result = executeCommand(suCommand, [config.options.backupUser, "-c", actualCommand])[0]
if result != 0:
raise IOError("Error [%d] calling s3Cmd to clear existing backup [%s]." % (result, s3BucketUrl))
@@ -571,7 +571,7 @@
"""
suCommand = resolveCommand(SU_COMMAND)
s3CmdCommand = resolveCommand(S3CMD_COMMAND)
- actualCommand = "%s put --recursive %s/ %s/" % (s3CmdCommand, stagingDir, s3BucketUrl)
+ actualCommand = "%s put --recursive %s/ %s/" % (s3CmdCommand[0], stagingDir, s3BucketUrl)
result = executeCommand(suCommand, [config.options.backupUser, "-c", actualCommand])[0]
if result != 0:
raise IOError("Error [%d] calling s3Cmd to store staging directory [%s]." % (result, s3BucketUrl))
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <pro...@us...> - 2014-10-02 01:23:56
|
Revision: 1060
http://sourceforge.net/p/cedar-backup/code/1060
Author: pronovic
Date: 2014-10-02 01:23:46 +0000 (Thu, 02 Oct 2014)
Log Message:
-----------
Use --force switch as required by s3cmd 1.5.x
Modified Paths:
--------------
cedar-backup2/trunk/CedarBackup2/extend/amazons3.py
Modified: cedar-backup2/trunk/CedarBackup2/extend/amazons3.py
===================================================================
--- cedar-backup2/trunk/CedarBackup2/extend/amazons3.py 2014-10-01 22:01:11 UTC (rev 1059)
+++ cedar-backup2/trunk/CedarBackup2/extend/amazons3.py 2014-10-02 01:23:46 UTC (rev 1060)
@@ -549,7 +549,7 @@
try:
suCommand = resolveCommand(SU_COMMAND)
s3CmdCommand = resolveCommand(S3CMD_COMMAND)
- actualCommand = "%s sync --no-encrypt --recursive --delete-removed %s/ %s/" % (s3CmdCommand[0], emptyDir, s3BucketUrl)
+ actualCommand = "%s sync --no-encrypt --recursive --delete-removed --force %s/ %s/" % (s3CmdCommand[0], emptyDir, s3BucketUrl)
result = executeCommand(suCommand, [config.options.backupUser, "-c", actualCommand])[0]
if result != 0:
raise IOError("Error [%d] calling s3Cmd to clear existing backup [%s]." % (result, s3BucketUrl))
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <pro...@us...> - 2014-10-03 00:44:22
|
Revision: 1065
http://sourceforge.net/p/cedar-backup/code/1065
Author: pronovic
Date: 2014-10-03 00:44:14 +0000 (Fri, 03 Oct 2014)
Log Message:
-----------
Continue implementing aws-cli (not tested yet)
Modified Paths:
--------------
cedar-backup2/trunk/CedarBackup2/extend/amazons3.py
Modified: cedar-backup2/trunk/CedarBackup2/extend/amazons3.py
===================================================================
--- cedar-backup2/trunk/CedarBackup2/extend/amazons3.py 2014-10-03 00:04:21 UTC (rev 1064)
+++ cedar-backup2/trunk/CedarBackup2/extend/amazons3.py 2014-10-03 00:44:14 UTC (rev 1065)
@@ -88,8 +88,11 @@
import logging
import tempfile
import datetime
+import json
+from string import Template
# Cedar Backup modules
+from CedarBackup2.filesystem import FilesystemList
from CedarBackup2.util import resolveCommand, executeCommand, isRunningAsRoot
from CedarBackup2.xmlutil import createInputDom, addContainerNode, addBooleanNode, addStringNode
from CedarBackup2.xmlutil import readFirstChild, readString, readBoolean
@@ -231,7 +234,7 @@
warnMidnite = property(_getWarnMidnite, _setWarnMidnite, None, "Whether to generate warnings for crossing midnite.")
s3Bucket = property(_getS3Bucket, _setS3Bucket, None, doc="Amazon S3 Bucket in which to store data")
- encryptCommand = property(_getEncryptCommand, _setEncryptCommand, None, doc="Command used to encrypt backup data before upload to S3")
+ encryptCommand = property(_getEncryptCommand, _setEncryptCommand, None, doc="Command used to encrypt data before upload to S3")
########################################################################
@@ -555,8 +558,18 @@
s3BucketUrl = "s3://%s/%s" % (local.amazons3.s3Bucket, dateSuffix)
logger.debug("S3 bucket URL is [%s]" % s3BucketUrl)
_clearExistingBackup(config, s3BucketUrl)
- _writeStagingDir(config, stagingDir, s3BucketUrl)
- _verifyStagingDir(config, stagingDir, s3BucketUrl)
+ if local.encrypt is None:
+ _uploadStagingDir(config, stagingDir, s3BucketUrl)
+ _verifyUpload(config, stagingDir, s3BucketUrl)
+ else:
+ encryptedDir = tempfile.mkdtemp(dir=config.options.workingDir)
+ try:
+ _encryptStagingDir(config, local, stagingDir, encryptedDir)
+ _uploadStagingDir(config, encryptedDir, s3BucketUrl)
+ _verifyUpload(config, stagingDir, s3BucketUrl)
+ finally:
+ if os.path.exists(encryptedDir):
+ os.rmdir(encryptedDir)
##################################
@@ -583,40 +596,88 @@
"""
Clear any existing backup files for an S3 bucket URL.
@param config: Config object.
- @param s3BucketUrl: S3 bucket URL derived for the staging directory
+ @param s3BucketUrl: S3 bucket URL associated with the staging directory
"""
suCommand = resolveCommand(SU_COMMAND)
awsCommand = resolveCommand(AWS_COMMAND)
actualCommand = "%s s3 rm --recursive %s/" % (awsCommand[0], s3BucketUrl)
result = executeCommand(suCommand, [config.options.backupUser, "-c", actualCommand])[0]
if result != 0:
- raise IOError("Error [%d] calling AWS CLI to clear existing backup [%s]." % (result, s3BucketUrl))
+ raise IOError("Error [%d] calling AWS CLI to clear existing backup for [%s]." % (result, s3BucketUrl))
-##############################
-# _writeStagingDir() function
-##############################
+###############################
+# _uploadStagingDir() function
+###############################
-def _writeStagingDir(config, stagingDir, s3BucketUrl):
+def _uploadStagingDir(config, stagingDir, s3BucketUrl):
"""
- Write a staging directory out to the Amazon S3 cloud.
+ Upload the contents of a staging directory out to the Amazon S3 cloud.
@param config: Config object.
- @param stagingDir: Staging directory to write
- @param s3BucketUrl: S3 bucket URL derived for the staging directory
+ @param stagingDir: Staging directory to upload
+ @param s3BucketUrl: S3 bucket URL associated with the staging directory
"""
- pass
+ suCommand = resolveCommand(SU_COMMAND)
+ awsCommand = resolveCommand(AWS_COMMAND)
+ actualCommand = "%s s3 cp --recursive %s/ %s/" % (awsCommand[0], stagingDir, s3BucketUrl)
+ result = executeCommand(suCommand, [config.options.backupUser, "-c", actualCommand])[0]
+ if result != 0:
+ raise IOError("Error [%d] calling AWS CLI to upload staging directory to [%s]." % (result, s3BucketUrl))
-###############################
-# _verifyStagingDir() function
-###############################
+###########################
+# _verifyUpload() function
+###########################
-def _verifyStagingDir(config, stagingDir, s3BucketUrl):
+def _verifyUpload(config, stagingDir, s3BucketUrl):
"""
- Verify that a staging directory was properly written to the Amazon S3 cloud.
+ Verify that a staging directory was properly uploaded to the Amazon S3 cloud.
@param config: Config object.
- @param stagingDir: Staging directory to write
- @param s3BucketUrl: S3 bucket URL derived for the staging directory
+ @param stagingDir: Staging directory to verify
+ @param s3BucketUrl: S3 bucket URL associated with the staging directory
"""
- pass
+ (bucket, prefix) = s3BucketUrl.replace("s3://", "").split("/")
+ suCommand = resolveCommand(SU_COMMAND)
+ awsCommand = resolveCommand(AWS_COMMAND)
+ query = "Contents[].{Key: Key, Size: Size}"
+ actualCommand = "%s s3api list-objects --bucket %s --prefix %s --query '%s'" % (awsCommand[0], bucket, prefix, query)
+ (result, text) = executeCommand(suCommand, [config.options.backupUser, "-c", actualCommand], returnOutput=True)
+ if result != 0:
+ raise IOError("Error [%d] calling AWS CLI verify upload to [%s]." % (result, s3BucketUrl))
+ contents = { }
+ for entry in json.loads(text):
+ contents[entry["Key"]] = float(entry["Size"])
+ files = FilesystemList()
+ files.addDirContents(stagingDir)
+ for entry in files:
+ key = entry.replace(config.stage.targetDir, "")
+ if not key in contents:
+ raise IOError("File was apparently not uploaded: [%s]" % entry)
+ else:
+ size = float(os.stat(entry).st_size)
+ if size != contents[key]:
+ raise IOError("File was uploaded but size differs (%f.0 vs %f.0): [%s]" % (size, contents[key], entry))
+
+################################
+# _encryptStagingDir() function
+################################
+
+def _encryptStagingDir(config, local, stagingDir, encryptedDir):
+ """
+ Encrypt a staging directory, creating a new directory in the process.
+ @param config: Config object.
+ @param stagingDir: Staging directory to use as source
+ @param encryptedDir: Target directory into which encrypted files should be written
+ """
+ suCommand = resolveCommand(SU_COMMAND)
+ files = FilesystemList()
+ files.addDirContents(stagingDir)
+ for cleartext in files:
+ encrypted = os.path.join(encryptedDir, cleartext.replace(config.stage.targetDir, ""))
+ actualCommand = Template(local.encrypt).substitute(input=cleartext, output=encrypted)
+ os.makedirs(os.path.dirname(encrypted))
+ result = executeCommand(suCommand, [config.options.backupUser, "-c", actualCommand])[0]
+ if result != 0:
+ raise IOError("Error [%d] encrypting [%s]." % (result, cleartext))
+
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <pro...@us...> - 2014-10-03 01:43:10
|
Revision: 1066
http://sourceforge.net/p/cedar-backup/code/1066
Author: pronovic
Date: 2014-10-03 01:43:04 +0000 (Fri, 03 Oct 2014)
Log Message:
-----------
Debugging on daystrom
Modified Paths:
--------------
cedar-backup2/trunk/CedarBackup2/extend/amazons3.py
Modified: cedar-backup2/trunk/CedarBackup2/extend/amazons3.py
===================================================================
--- cedar-backup2/trunk/CedarBackup2/extend/amazons3.py 2014-10-03 00:44:14 UTC (rev 1065)
+++ cedar-backup2/trunk/CedarBackup2/extend/amazons3.py 2014-10-03 01:43:04 UTC (rev 1066)
@@ -89,11 +89,12 @@
import tempfile
import datetime
import json
+import shutil
from string import Template
# Cedar Backup modules
from CedarBackup2.filesystem import FilesystemList
-from CedarBackup2.util import resolveCommand, executeCommand, isRunningAsRoot
+from CedarBackup2.util import resolveCommand, executeCommand, isRunningAsRoot, changeOwnership
from CedarBackup2.xmlutil import createInputDom, addContainerNode, addBooleanNode, addStringNode
from CedarBackup2.xmlutil import readFirstChild, readString, readBoolean
from CedarBackup2.actions.util import writeIndicatorFile
@@ -558,18 +559,21 @@
s3BucketUrl = "s3://%s/%s" % (local.amazons3.s3Bucket, dateSuffix)
logger.debug("S3 bucket URL is [%s]" % s3BucketUrl)
_clearExistingBackup(config, s3BucketUrl)
- if local.encrypt is None:
+ if local.amazons3.encryptCommand is None:
+ logger.debug("Encryption is disabled; files will be uploaded in cleartext.")
_uploadStagingDir(config, stagingDir, s3BucketUrl)
_verifyUpload(config, stagingDir, s3BucketUrl)
else:
+ logger.debug("Encryption is enabled; files will be uploaded after being encrypted.")
encryptedDir = tempfile.mkdtemp(dir=config.options.workingDir)
+ changeOwnership(encryptedDir, config.options.backupUser, config.options.backupGroup)
try:
_encryptStagingDir(config, local, stagingDir, encryptedDir)
_uploadStagingDir(config, encryptedDir, s3BucketUrl)
- _verifyUpload(config, stagingDir, s3BucketUrl)
+ _verifyUpload(config, encryptedDir, s3BucketUrl)
finally:
if os.path.exists(encryptedDir):
- os.rmdir(encryptedDir)
+ shutil.rmtree(encryptedDir)
##################################
@@ -636,27 +640,30 @@
@param stagingDir: Staging directory to verify
@param s3BucketUrl: S3 bucket URL associated with the staging directory
"""
- (bucket, prefix) = s3BucketUrl.replace("s3://", "").split("/")
+ (bucket, prefix) = s3BucketUrl.replace("s3://", "").split("/", 1)
suCommand = resolveCommand(SU_COMMAND)
awsCommand = resolveCommand(AWS_COMMAND)
query = "Contents[].{Key: Key, Size: Size}"
actualCommand = "%s s3api list-objects --bucket %s --prefix %s --query '%s'" % (awsCommand[0], bucket, prefix, query)
- (result, text) = executeCommand(suCommand, [config.options.backupUser, "-c", actualCommand], returnOutput=True)
+ (result, data) = executeCommand(suCommand, [config.options.backupUser, "-c", actualCommand], returnOutput=True)
if result != 0:
raise IOError("Error [%d] calling AWS CLI verify upload to [%s]." % (result, s3BucketUrl))
contents = { }
- for entry in json.loads(text):
- contents[entry["Key"]] = float(entry["Size"])
+ for entry in json.loads("".join(data)):
+ key = entry["Key"].replace(prefix, "")
+ size = long(entry["Size"])
+ contents[key] = size
files = FilesystemList()
- files.addDirContents(stagingDir)
+ files.addDirContents(stagingDir)
for entry in files:
- key = entry.replace(config.stage.targetDir, "")
- if not key in contents:
- raise IOError("File was apparently not uploaded: [%s]" % entry)
- else:
- size = float(os.stat(entry).st_size)
- if size != contents[key]:
- raise IOError("File was uploaded but size differs (%f.0 vs %f.0): [%s]" % (size, contents[key], entry))
+ if os.path.isfile(entry):
+ key = entry.replace(stagingDir, "")
+ size = long(os.stat(entry).st_size)
+ if not key in contents:
+ raise IOError("File was apparently not uploaded: [%s]" % entry)
+ else:
+ if size != contents[key]:
+ raise IOError("File size differs [%s], expected %s bytes but got %s bytes" % (entry, size, contents[key]))
################################
@@ -672,12 +679,19 @@
"""
suCommand = resolveCommand(SU_COMMAND)
files = FilesystemList()
- files.addDirContents(stagingDir)
+ files.addDirContents(stagingDir)
for cleartext in files:
- encrypted = os.path.join(encryptedDir, cleartext.replace(config.stage.targetDir, ""))
- actualCommand = Template(local.encrypt).substitute(input=cleartext, output=encrypted)
- os.makedirs(os.path.dirname(encrypted))
- result = executeCommand(suCommand, [config.options.backupUser, "-c", actualCommand])[0]
- if result != 0:
- raise IOError("Error [%d] encrypting [%s]." % (result, cleartext))
+ if os.path.isfile(cleartext):
+ encrypted = "%s%s" % (encryptedDir, cleartext.replace(stagingDir, ""))
+ if long(os.stat(cleartext).st_size) == 0:
+ open(encrypted, 'a').close() # don't bother encrypting empty files
+ else:
+ actualCommand = Template(local.amazons3.encryptCommand).substitute(input=cleartext, output=encrypted)
+ subdir = os.path.dirname(encrypted)
+ if not os.path.isdir(subdir):
+ os.makedirs(subdir)
+ changeOwnership(subdir, config.options.backupUser, config.options.backupGroup)
+ result = executeCommand(suCommand, [config.options.backupUser, "-c", actualCommand])[0]
+ if result != 0:
+ raise IOError("Error [%d] encrypting [%s]." % (result, cleartext))
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <pro...@us...> - 2014-10-03 14:41:18
|
Revision: 1070
http://sourceforge.net/p/cedar-backup/code/1070
Author: pronovic
Date: 2014-10-03 14:41:11 +0000 (Fri, 03 Oct 2014)
Log Message:
-----------
Eliminate use of deprecated string.Template
Modified Paths:
--------------
cedar-backup2/trunk/CedarBackup2/extend/amazons3.py
Modified: cedar-backup2/trunk/CedarBackup2/extend/amazons3.py
===================================================================
--- cedar-backup2/trunk/CedarBackup2/extend/amazons3.py 2014-10-03 14:33:52 UTC (rev 1069)
+++ cedar-backup2/trunk/CedarBackup2/extend/amazons3.py 2014-10-03 14:41:11 UTC (rev 1070)
@@ -90,7 +90,6 @@
import datetime
import json
import shutil
-from string import Template
# Cedar Backup modules
from CedarBackup2.filesystem import FilesystemList
@@ -686,7 +685,7 @@
if long(os.stat(cleartext).st_size) == 0:
open(encrypted, 'a').close() # don't bother encrypting empty files
else:
- actualCommand = Template(local.amazons3.encryptCommand).substitute(input=cleartext, output=encrypted)
+ actualCommand = local.amazons3.encryptCommand.replace("${input}", cleartext).replace("${output}", encrypted)
subdir = os.path.dirname(encrypted)
if not os.path.isdir(subdir):
os.makedirs(subdir)
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <pro...@us...> - 2014-10-03 16:26:49
|
Revision: 1072
http://sourceforge.net/p/cedar-backup/code/1072
Author: pronovic
Date: 2014-10-03 16:26:45 +0000 (Fri, 03 Oct 2014)
Log Message:
-----------
Add debug logging for various tasks
Modified Paths:
--------------
cedar-backup2/trunk/CedarBackup2/extend/amazons3.py
Modified: cedar-backup2/trunk/CedarBackup2/extend/amazons3.py
===================================================================
--- cedar-backup2/trunk/CedarBackup2/extend/amazons3.py 2014-10-03 14:43:13 UTC (rev 1071)
+++ cedar-backup2/trunk/CedarBackup2/extend/amazons3.py 2014-10-03 16:26:45 UTC (rev 1072)
@@ -63,7 +63,7 @@
/usr/bin/gpg -c --no-use-agent --batch --yes --passphrase-file /home/backup/.passphrase -o ${output} ${input}
-The GPG mechanism depends on a strong passprhase for security. One way to
+The GPG mechanism depends on a strong passphrase for security. One way to
generate a strong passphrase is using your system random number generator, i.e.
C{dd if=/dev/urandom count=20 bs=1 | xxd -ps}. (See U{StackExchange
http://security.stackexchange.com/questions/14867/gpg-encryption-security>} for
@@ -126,8 +126,8 @@
The following restrictions exist on data in this class:
- - The s3Bucket value, if set, must be a non-empty string
- - The encryptCommand valu, if set, must be a non-empty string
+ - The s3Bucket value must be a non-empty string
+ - The encryptCommand value, if set, must be a non-empty string
@sort: __init__, __repr__, __str__, __cmp__, warnMidnite, s3Bucket
"""
@@ -607,6 +607,7 @@
result = executeCommand(suCommand, [config.options.backupUser, "-c", actualCommand])[0]
if result != 0:
raise IOError("Error [%d] calling AWS CLI to clear existing backup for [%s]." % (result, s3BucketUrl))
+ logger.debug("Completed clearing any existing backup in S3 for [%s]" % s3BucketUrl)
###############################
@@ -626,6 +627,7 @@
result = executeCommand(suCommand, [config.options.backupUser, "-c", actualCommand])[0]
if result != 0:
raise IOError("Error [%d] calling AWS CLI to upload staging directory to [%s]." % (result, s3BucketUrl))
+ logger.debug("Completed uploading staging dir [%s] to [%s]" % (stagingDir, s3BucketUrl))
###########################
@@ -663,6 +665,7 @@
else:
if size != contents[key]:
raise IOError("File size differs [%s], expected %s bytes but got %s bytes" % (entry, size, contents[key]))
+ logger.debug("Completed verifying upload from [%s] to [%s]." % (stagingDir, s3BucketUrl))
################################
@@ -693,4 +696,5 @@
result = executeCommand(suCommand, [config.options.backupUser, "-c", actualCommand])[0]
if result != 0:
raise IOError("Error [%d] encrypting [%s]." % (result, cleartext))
+ logger.debug("Completed encrypting staging directory [%s] into [%s]" % (stagingDir, encryptedDir))
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <pro...@us...> - 2015-01-05 20:43:39
|
Revision: 1097
http://sourceforge.net/p/cedar-backup/code/1097
Author: pronovic
Date: 2015-01-05 20:43:36 +0000 (Mon, 05 Jan 2015)
Log Message:
-----------
Test amazons3 changes
Modified Paths:
--------------
cedar-backup2/trunk/CedarBackup2/extend/amazons3.py
Modified: cedar-backup2/trunk/CedarBackup2/extend/amazons3.py
===================================================================
--- cedar-backup2/trunk/CedarBackup2/extend/amazons3.py 2015-01-05 20:24:25 UTC (rev 1096)
+++ cedar-backup2/trunk/CedarBackup2/extend/amazons3.py 2015-01-05 20:43:36 UTC (rev 1097)
@@ -645,12 +645,14 @@
logger.debug("Amazon S3 size limit is: %d bytes" % limit)
contents = BackupFileList()
for stagingDir in stagingDirs:
- contents.addDir(stagingDir)
+ contents.addDirContents(stagingDir)
total = contents.totalSize()
logger.debug("Amazon S3 backup size is is: %d bytes" % total)
if total > limit:
- logger.debug("Amazon S3 size limit exceeded: %.0f bytes > %d bytes" % (total, limit))
+ logger.error("Amazon S3 size limit exceeded: %.0f bytes > %d bytes" % (total, limit))
raise ValueError("Amazon S3 size limit exceeded: %.0f bytes > %d bytes" % (total, limit))
+ else:
+ logger.info("Total size does not exceed Amazon S3 size limit, so backup can continue.")
##############################
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|