def next(self):
"""Return next elem, add to cache. StopIteration passed upwards"""
next_elem = self.iter.next()
next_index = next_elem.index
self.cache_dict[next_index] = next_elem
self.cache_indicies.append(next_index)
if len(self.cache_indicies) > self.cache_size:
try:
del self.cache_dict[self.cache_indicies[0]]
except KeyError:
log.Log("Warning: index %s missing from iterator cache" %
(self.cache_indicies[0],), 2)
del self.cache_indicies[0]
return next_elem
python类Log()的实例源码
def check_common_error(error_handler, function, args = []):
"""Apply function to args, if error, run error_handler on exception
This uses the catch_error predicate below to only catch
certain exceptions which seems innocent enough.
"""
try: return function(*args)
except (Exception, KeyboardInterrupt, SystemExit), exc:
TracebackArchive.add([function] + list(args))
if catch_error(exc):
log.Log.exception()
conn = Globals.backup_writer
if conn is not None: conn.statistics.record_error()
if error_handler: return error_handler(exc, *args)
else: return None
if is_routine_fatal(exc): log.Log.exception(1, 6)
else: log.Log.exception(1, 2)
raise
def copy_reg_file(rpin, rpout, compress = 0):
"""Copy regular file rpin to rpout, possibly avoiding connection"""
try:
if (rpout.conn is rpin.conn and
rpout.conn is not Globals.local_connection):
v = rpout.conn.rpath.copy_reg_file(rpin.path, rpout.path, compress)
rpout.setdata()
return v
except AttributeError: pass
try:
return rpout.write_from_fileobj(rpin.open("rb"), compress = compress)
except IOError, e:
if (e.errno == errno.ERANGE):
log.Log.FatalError("'IOError - Result too large' while reading %s. "
"If you are using a Mac, this is probably "
"the result of HFS+ filesystem corruption. "
"Please exclude this file from your backup "
"before proceeding." % rpin.path)
else:
raise
def copy_attribs(rpin, rpout):
"""Change file attributes of rpout to match rpin
Only changes the chmoddable bits, uid/gid ownership, and
timestamps, so both must already exist.
"""
log.Log("Copying attributes from %s to %s" % (rpin.index, rpout.path), 7)
assert rpin.lstat() == rpout.lstat() or rpin.isspecial()
if Globals.change_ownership:
rpout.chown(*rpout.conn.user_group.map_rpath(rpin))
if Globals.eas_write: rpout.write_ea(rpin.get_ea())
if rpin.issym(): return # symlinks don't have times or perms
if (Globals.resource_forks_write and rpin.isreg() and
rpin.has_resource_fork()):
rpout.write_resource_fork(rpin.get_resource_fork())
if (Globals.carbonfile_write and rpin.isreg() and
rpin.has_carbonfile()):
rpout.write_carbonfile(rpin.get_carbonfile())
rpout.chmod(rpin.getperms())
if Globals.acls_write: rpout.write_acl(rpin.get_acl())
if not rpin.isdev(): rpout.setmtime(rpin.getmtime())
if Globals.win_acls_write: rpout.write_win_acl(rpin.get_win_acl())
def copy_attribs_inc(rpin, rpout):
"""Change file attributes of rpout to match rpin
Like above, but used to give increments the same attributes as the
originals. Therefore, don't copy all directory acl and
permissions.
"""
log.Log("Copying inc attrs from %s to %s" % (rpin.index, rpout.path), 7)
check_for_files(rpin, rpout)
if Globals.change_ownership: apply(rpout.chown, rpin.getuidgid())
if Globals.eas_write: rpout.write_ea(rpin.get_ea())
if rpin.issym(): return # symlinks don't have times or perms
if (Globals.resource_forks_write and rpin.isreg() and
rpin.has_resource_fork() and rpout.isreg()):
rpout.write_resource_fork(rpin.get_resource_fork())
if (Globals.carbonfile_write and rpin.isreg() and
rpin.has_carbonfile() and rpout.isreg()):
rpout.write_carbonfile(rpin.get_carbonfile())
if rpin.isdir() and not rpout.isdir():
rpout.chmod(rpin.getperms() & 0777)
else: rpout.chmod(rpin.getperms())
if Globals.acls_write: rpout.write_acl(rpin.get_acl(), map_names = 0)
if not rpin.isdev(): rpout.setmtime(rpin.getmtime())
def chmod(self, permissions, loglevel = 2):
"""Wrapper around os.chmod"""
try:
self.conn.os.chmod(self.path, permissions & Globals.permission_mask)
except OSError, e:
if e.strerror == "Inappropriate file type or format" \
and not self.isdir():
# Some systems throw this error if try to set sticky bit
# on a non-directory. Remove sticky bit and try again.
log.Log("Warning: Unable to set permissions of %s to %o - "
"trying again without sticky bit (%o)" % (self.path,
permissions, permissions & 06777), loglevel)
self.conn.os.chmod(self.path, permissions
& 06777 & Globals.permission_mask)
else:
raise
self.data['perms'] = permissions
def makedev(self, type, major, minor):
"""Make a special file with specified type, and major/minor nums"""
if type == 'c':
datatype = 'chr'
mode = stat.S_IFCHR | 0600
elif type == 'b':
datatype = 'blk'
mode = stat.S_IFBLK | 0600
else: raise RPathException
try: self.conn.os.mknod(self.path, mode, self.conn.os.makedev(major, minor))
except (OSError, AttributeError), e:
if isinstance(e, AttributeError) or e.errno == errno.EPERM:
# AttributeError will be raised by Python 2.2, which
# doesn't have os.mknod
log.Log("unable to mknod %s -- using touch instead" % self.path, 4)
self.touch()
self.setdata()
def write_carbonfile(self, cfile):
"""Write new carbon data to self."""
if not cfile: return
log.Log("Writing carbon data to %s" % (self.index,), 7)
from Carbon.File import FSSpec
from Carbon.File import FSRef
import Carbon.Files
import MacOS
fsobj = FSSpec(self.path)
finderinfo = fsobj.FSpGetFInfo()
finderinfo.Creator = cfile['creator']
finderinfo.Type = cfile['type']
finderinfo.Location = cfile['location']
finderinfo.Flags = cfile['flags']
fsobj.FSpSetFInfo(finderinfo)
"""Write Creation Date to self (if stored in metadata)."""
try:
cdate = cfile['createDate']
fsref = FSRef(fsobj)
cataloginfo, d1, d2, d3 = fsref.FSGetCatalogInfo(Carbon.Files.kFSCatInfoCreateDate)
cataloginfo.createDate = (0, cdate, 0)
fsref.FSSetCatalogInfo(Carbon.Files.kFSCatInfoCreateDate, cataloginfo)
self.set_carbonfile(cfile)
except KeyError: self.set_carbonfile(cfile)
def Increment(new, mirror, incpref):
"""Main file incrementing function, returns inc file created
new is the file on the active partition,
mirror is the mirrored file from the last backup,
incpref is the prefix of the increment file.
This function basically moves the information about the mirror
file to incpref.
"""
log.Log("Incrementing mirror file " + mirror.path, 5)
if ((new and new.isdir()) or mirror.isdir()) and not incpref.lstat():
incpref.mkdir()
if not mirror.lstat(): incrp = makemissing(incpref)
elif mirror.isdir(): incrp = makedir(mirror, incpref)
elif new.isreg() and mirror.isreg():
incrp = makediff(new, mirror, incpref)
else: incrp = makesnapshot(mirror, incpref)
statistics.process_increment(incrp)
return incrp
def filelist_globbing_get_sfs(self, filelist_fp, inc_default, list_name):
"""Return list of selection functions by reading fileobj
filelist_fp should be an open file object
inc_default is true iff this is an include list
list_name is just the name of the list, used for logging
See the man page on --[include/exclude]-globbing-filelist
"""
log.Log("Reading globbing filelist %s" % list_name, 4)
separator = Globals.null_separator and "\0" or "\n"
for line in filelist_fp.read().split(separator):
if not line: continue # skip blanks
if line[:2] == "+ ": yield self.glob_get_sf(line[2:], 1)
elif line[:2] == "- ": yield self.glob_get_sf(line[2:], 0)
else: yield self.glob_get_sf(line, inc_default)
def compare_hash(cls, repo_iter):
"""Like above, but also compare sha1 sums of any regular files"""
def hashes_changed(src_rp, mir_rorp):
"""Return 0 if their data hashes same, 1 otherwise"""
if not mir_rorp.has_sha1():
log.Log("Warning: Metadata file has no digest for %s, "
"unable to compare." % (mir_rorp.get_indexpath(),), 2)
return 0
elif (src_rp.getsize() == mir_rorp.getsize() and
hash.compute_sha1(src_rp) == mir_rorp.get_sha1()):
return 0
return 1
src_iter = cls.get_source_select()
for src_rp, mir_rorp in rorpiter.Collate2Iters(src_iter, repo_iter):
report = get_basic_report(src_rp, mir_rorp, hashes_changed)
if report: yield report
else: log_success(src_rp, mir_rorp)
def compare_full(cls, src_root, repo_iter):
"""Given repo iter with full data attached, return report iter"""
def error_handler(exc, src_rp, repo_rorp):
log.Log("Error reading file %s" % (src_rp.path,), 2)
return 0 # They aren't the same if we get an error
def data_changed(src_rp, repo_rorp):
"""Return 0 if full compare of data matches, 1 otherwise"""
if src_rp.getsize() != repo_rorp.getsize(): return 1
return not robust.check_common_error(error_handler,
rpath.cmp, (src_rp, repo_rorp))
for repo_rorp in repo_iter:
src_rp = src_root.new_index(repo_rorp.index)
report = get_basic_report(src_rp, repo_rorp, data_changed)
if report: yield report
else: log_success(repo_rorp)
def startBattle(self, spot, fleet, formation):
try:
data = self.conn.get('/pve/dealto/%d/%d/%d/' % (spot, fleet.id, formation), param='',
headers=self.conn.getHeader,
server=self.conn.getHeader.get('Host'))
if data == -1:
self.Log.i("Connection Error!")
return -1
if 'warReport' in data:
selfHp = data['warReport']['hpBeforeNightWarSelf']
enemyHp = data['warReport']['hpBeforeNightWarEnemy']
else:
selfHp = 0
enemyHp = 0
lastSpot = (int(data['pveLevelEnd']) == 1)
return selfHp, enemyHp, lastSpot
except:
return -1
def __init__(self, tool_name, tool_version):
self.tool_name = tool_name
self.tool_version = tool_version
self.terminateFlingOnException = False
self.env = argparse.Namespace()
self.params = argparse.Namespace()
self.key_data = None
self.vinfo = None
self.log = Log(self.tool_name, self.tool_version)
self.log.open()
self._init_parser()
def FillInIter(rpiter, rootrp):
"""Given ordered rpiter and rootrp, fill in missing indicies with rpaths
For instance, suppose rpiter contains rpaths with indicies (),
(1,2), (2,5). Then return iter with rpaths (), (1,), (1,2), (2,),
(2,5). This is used when we need to process directories before or
after processing a file in that directory.
"""
# Handle first element as special case
first_rp = rpiter.next() # StopIteration gets passed upwards
cur_index = first_rp.index
for i in range(len(cur_index)): yield rootrp.new_index(cur_index[:i])
yield first_rp
del first_rp
old_index = cur_index
# Now do all the other elements
for rp in rpiter:
cur_index = rp.index
if not cur_index[:-1] == old_index[:-1]: # Handle special case quickly
for i in range(1, len(cur_index)): # i==0 case already handled
if cur_index[:i] != old_index[:i]:
filler_rp = rootrp.new_index(cur_index[:i])
if not filler_rp.isdir():
log.Log("Warning: expected %s to be a directory but "
"found %s instead.\nThis is probably caused "
"by a bug in versions 1.0.0 and earlier." %
(filler_rp.path, filler_rp.lstat()), 2)
filler_rp.make_zero_dir(rootrp)
yield filler_rp
yield rp
old_index = cur_index
def __call__(self, *args):
"""Process args, where args[0] is current position in iterator
Returns true if args successfully processed, false if index is
not in the current tree and thus the final result is
available.
Also note below we set self.index after doing the necessary
start processing, in case there is a crash in the middle.
"""
index = args[0]
if self.index is None:
self.root_branch.base_index = index
if self.root_branch.can_fast_process(*args):
self.root_branch.fast_process(*args)
self.root_fast_processed = 1
else: self.root_branch.start_process(*args)
self.index = index
return 1
if index == self.index:
log.Log("Warning, repeated index %s, bad filesystem?"
% (index,), 2)
elif index < self.index:
assert 0, "Bad index order: %s >= %s" % (self.index, index)
else: # normal case
if self.finish_branches(index) is None:
return None # We are no longer in the main tree
last_branch = self.branches[-1]
if last_branch.can_fast_process(*args):
last_branch.fast_process(*args)
else:
branch = self.add_branch(index)
branch.start_process(*args)
self.index = index
return 1
def get_signature(rp, blocksize = None):
"""Take signature of rpin file and return in file object"""
if not blocksize: blocksize = find_blocksize(rp.getsize())
log.Log("Getting signature of %s with blocksize %s" %
(rp.get_indexpath(), blocksize), 7)
return librsync.SigFile(rp.open("rb"), blocksize)
def get_delta_sigfileobj(sig_fileobj, rp_new):
"""Like get_delta but signature is in a file object"""
log.Log("Getting delta of %s with signature stream" % (rp_new.path,), 7)
return librsync.DeltaFile(sig_fileobj, rp_new.open("rb"))
def get_delta_sigrp_hash(rp_signature, rp_new):
"""Like above but also calculate hash of new as close() value"""
log.Log("Getting delta (with hash) of %s with signature %s" %
(rp_new.path, rp_signature.get_indexpath()), 7)
return librsync.DeltaFile(rp_signature.open("rb"),
hash.FileWrapper(rp_new.open("rb")))
def write_delta(basis, new, delta, compress = None):
"""Write rdiff delta which brings basis to new"""
log.Log("Writing delta %s from %s -> %s" %
(basis.path, new.path, delta.path), 7)
deltafile = librsync.DeltaFile(get_signature(basis), new.open("rb"))
delta.write_from_fileobj(deltafile, compress)
def clear_rp(self, rp):
# not sure how to interpret this
# I'll just clear all acl-s from rp.path
try:
sd = rp.conn.win32security. \
GetNamedSecurityInfo(rp.path, SE_FILE_OBJECT, ACL.flags)
except (OSError, IOError, pywintypes.error), exc:
log.Log("Warning: unable to read ACL from %s for clearing: %s"
% (repr(rp.path), exc), 4)
return
acl = sd.GetSecurityDescriptorDacl()
if acl:
n = acl.GetAceCount()
# traverse the ACL in reverse, so the indices stay correct
while n:
n -= 1
acl.DeleteAce(n)
sd.SetSecurityDescriptorDacl(0, acl, 0)
if ACL.flags & SACL_SECURITY_INFORMATION:
acl = sd.GetSecurityDescriptorSacl()
if acl:
n = acl.GetAceCount()
# traverse the ACL in reverse, so the indices stay correct
while n:
n -= 1
acl.DeleteAce(n)
sd.SetSecurityDescriptorSacl(0, acl, 0)
try:
rp.conn.win32security. \
SetNamedSecurityInfo(rp.path, SE_FILE_OBJECT, ACL.flags,
sd.GetSecurityDescriptorOwner(),sd.GetSecurityDescriptorGroup(),
sd.GetSecurityDescriptorDacl(),
(ACL.flags & SACL_SECURITY_INFORMATION) and
sd.GetSecurityDescriptorSacl() or None)
except (OSError, IOError, pywintypes.error), exc:
log.Log("Warning: unable to set ACL on %s after clearing: %s"
% (repr(rp.path), exc), 4)
def listrp(rp):
"""Like rp.listdir() but return [] if error, and sort results"""
def error_handler(exc):
log.Log("Error listing directory %s" % rp.path, 2)
return []
dir_listing = check_common_error(error_handler, rp.listdir)
dir_listing.sort()
return dir_listing
def add(cls, extra_args = []):
"""Add most recent exception to archived list
If extra_args are present, convert to strings and add them as
extra information to same traceback archive.
"""
cls._traceback_strings.append(log.Log.exception_to_string(extra_args))
if len(cls._traceback_strings) > 10:
cls._traceback_strings = cls._traceback_strings[:10]
def log(cls):
"""Print all exception information to log file"""
if cls._traceback_strings:
log.Log("------------ Old traceback info -----------\n%s\n"
"-------------------------------------------" %
("\n".join(cls._traceback_strings),), 3)
def set_init_quote_vals_local():
"""Set value on local connection, initialize regexps"""
global chars_to_quote, quoting_char
chars_to_quote = Globals.chars_to_quote
if len(Globals.quoting_char) != 1:
log.Log.FatalError("Expected single character for quoting char,"
"got '%s' instead" % (Globals.quoting_char,))
quoting_char = Globals.quoting_char
init_quoting_regexps()
def update_quoting(rbdir):
"""Update the quoting of a repository by renaming any
files that should be quoted differently.
"""
def requote(name):
unquoted_name = unquote(name)
quoted_name = quote(unquoted_name)
if name != quoted_name:
return quoted_name
else:
return None
def process(dirpath_rp, name, list):
new_name = requote(name)
if new_name:
if list:
list.remove(name)
list.append(new_name)
name_rp = dirpath_rp.append(name)
new_rp = dirpath_rp.append(new_name)
log.Log("Re-quoting %s to %s" % (name_rp.path, new_rp.path), 5)
rpath.move(name_rp, new_rp)
assert rbdir.conn is Globals.local_connection
mirror_rp = rbdir.get_parent_rp()
mirror = mirror_rp.path
log.Log("Re-quoting repository %s" % mirror_rp.path, 3)
try:
os_walk = os.walk
except AttributeError:
os_walk = walk
for dirpath, dirs, files in os_walk(mirror):
dirpath_rp = mirror_rp.newpath(dirpath)
for name in dirs: process(dirpath_rp, name, dirs)
for name in files: process(dirpath_rp, name, None)
def copy(rpin, rpout, compress = 0):
"""Copy RPath rpin to rpout. Works for symlinks, dirs, etc.
Returns close value of input for regular file, which can be used
to pass hashes on.
"""
log.Log("Regular copying %s to %s" % (rpin.index, rpout.path), 6)
if not rpin.lstat():
if rpout.lstat(): rpout.delete()
return
if rpout.lstat():
if rpin.isreg() or not cmp(rpin, rpout):
rpout.delete() # easier to write than compare
else: return
if rpin.isreg(): return copy_reg_file(rpin, rpout, compress)
elif rpin.isdir(): rpout.mkdir()
elif rpin.issym():
# some systems support permissions for symlinks, but
# only by setting at creation via the umask
if Globals.symlink_perms: orig_umask = os.umask(0777 & ~rpin.getperms())
rpout.symlink(rpin.readlink())
if Globals.symlink_perms: os.umask(orig_umask) # restore previous umask
elif rpin.ischardev():
major, minor = rpin.getdevnums()
rpout.makedev("c", major, minor)
elif rpin.isblkdev():
major, minor = rpin.getdevnums()
rpout.makedev("b", major, minor)
elif rpin.isfifo(): rpout.mkfifo()
elif rpin.issock(): rpout.mksock()
else: raise RPathException("File %s has unknown type" % rpin.path)
def rename(rp_source, rp_dest):
"""Rename rp_source to rp_dest"""
assert rp_source.conn is rp_dest.conn
log.Log(lambda: "Renaming %s to %s" % (rp_source.path, rp_dest.path), 7)
if not rp_source.lstat(): rp_dest.delete()
else:
if rp_dest.lstat() and rp_source.getinode() == rp_dest.getinode() and \
rp_source.getinode() != 0:
log.Log("Warning: Attempt to rename over same inode: %s to %s"
% (rp_source.path, rp_dest.path), 2)
# You can't rename one hard linked file over another
rp_source.delete()
else:
try:
rp_source.conn.os.rename(rp_source.path, rp_dest.path)
except OSError, error:
# XXX errno.EINVAL and len(rp_dest.path) >= 260 indicates
# pathname too long on Windows
if error.errno != errno.EEXIST:
log.Log("OSError while renaming %s to %s"
% (rp_source.path, rp_dest.path), 1)
raise
# On Windows, files can't be renamed on top of an existing file
rp_source.conn.os.chmod(rp_dest.path, 0700)
rp_source.conn.os.unlink(rp_dest.path)
rp_source.conn.os.rename(rp_source.path, rp_dest.path)
rp_dest.data = rp_source.data
rp_source.data = {'type': None}
def equal_verbose(self, other, check_index = 1,
compare_inodes = 0, compare_ownership = 0,
compare_acls = 0, compare_eas = 0, compare_win_acls = 0,
compare_size = 1, compare_type = 1, verbosity = 2):
"""Like __eq__, but log more information. Useful when testing"""
if check_index and self.index != other.index:
log.Log("Index %s != index %s" % (self.index, other.index),
verbosity)
return None
for key in self.data.keys(): # compare dicts key by key
if (key in ('uid', 'gid', 'uname', 'gname') and
(self.issym() or not compare_ownership)):
# Don't compare gid/uid for symlinks, or if told not to
pass
elif key == 'type' and not compare_type: pass
elif key == 'atime' and not Globals.preserve_atime: pass
elif key == 'ctime': pass
elif key == 'devloc' or key == 'nlink': pass
elif key == 'size' and (not self.isreg() or not compare_size): pass
elif key == 'inode' and (not self.isreg() or not compare_inodes):
pass
elif key == 'ea' and not compare_eas: pass
elif key == 'acl' and not compare_acls: pass
elif key == 'win_acl' and not compare_win_acls: pass
elif (not other.data.has_key(key) or
self.data[key] != other.data[key]):
if not other.data.has_key(key):
log.Log("Second is missing key %s" % (key,), verbosity)
else: log.Log("Value of %s differs: %s vs %s" %
(key, self.data[key], other.data[key]),
verbosity)
return None
return 1
def settime(self, accesstime, modtime):
"""Change file modification times"""
log.Log("Setting time of %s to %d" % (self.path, modtime), 7)
try: self.conn.os.utime(self.path, (accesstime, modtime))
except OverflowError:
log.Log("Cannot change times of %s to %s - problem is probably"
"64->32bit conversion" %
(self.path, (accesstime, modtime)), 2)
else:
self.data['atime'] = accesstime
self.data['mtime'] = modtime