def backup_root(self, root, absroots):
logging.info('Backing up root %s', root)
self.progress.what('connecting to live data %s' % root)
self.reopen_fs(root)
self.progress.what('scanning for files in %s' % root)
absroot = self.fs.abspath('.')
# If the root is a file, we can just back up the file.
if os.path.isfile(root):
self.just_one_file = os.path.join(absroot, os.path.split(root)[1])
else:
self.just_one_file = None
self.root_metadata = self.fs.lstat(absroot)
num_dirs = 0
# The following is a very approximate guess, but we have no
# way of being exact.
dir_entry_size = 1000
flush_threshold = obnamlib.DEFAULT_DIR_BAG_BYTES / dir_entry_size
for pathname, metadata in self.find_files(absroot):
logging.info('Backing up %s', pathname)
if not self.pretend:
existed = self.repo.file_exists(self.new_generation, pathname)
try:
self.maybe_simulate_error(pathname)
if stat.S_ISDIR(metadata.st_mode):
self.backup_directory(pathname, metadata, absroots)
else:
self.backup_non_directory(pathname, metadata)
except (IOError, OSError) as e:
e2 = self.translate_enverror_to_obnamerror(pathname, e)
msg = 'Can\'t back up %s: %s' % (pathname, str(e2))
self.progress.error(msg, exc=e)
if not existed and not self.pretend:
self.remove_partially_backed_up_file(pathname)
if e.errno in (errno.ENOSPC, errno.EPIPE):
raise
if metadata.isdir() and not self.pretend:
num_dirs += 1
if num_dirs >= flush_threshold:
self.repo.flush_client(self.client_name)
self.app.dump_memory_profile('after flushing client')
num_dirs = 0
if self.checkpoint_manager.time_for_checkpoint():
self.make_checkpoint()
self.progress.what(pathname)
num_dirs = 0
self.backup_parents('.')
评论列表
文章目录