def graphtxn(write=False, create=False, excl=False, on_success=None, on_failure=None):
def decorator(func):
def wrapper(self, _, uuid, *args, **kwargs):
g = self.graph(uuid, readonly=not write, create=create, excl=excl)
success = None
try:
with g.transaction(write=write) as txn:
lastID = txn.lastID
success = False
ret = func(self, g, txn, _, uuid, *args, **kwargs)
try:
next = getattr(ret, 'next')
except AttributeError:
ret = [ret] if ret is not None else []
finally:
if write:
txn.flush()
self.res.headers.set('X-lg-updates', txn.lastID - lastID)
self.res.headers.set('X-lg-maxID', txn.lastID)
for x in ret:
yield x
success = True
# delay final chunking trailer until txn has been committed
if write:
if 'x-lg-sync' in self.req.headers:
g.sync(force=True)
yield ''
except HTTPError:
raise
except (IOError, OSError) as e:
raise HTTPError(507 if e.errno is errno.ENOSPC else 500, str(e))
except Exception as e:
info = sys.exc_info()
log.error('Unhandled exception: %s', traceback.print_exception(*info))
raise
finally:
if success is True:
if on_success:
on_success(g)
elif success is False:
if on_failure:
on_failure(g)
g.close()
return wrapper
return decorator
python类ENOSPC的实例源码
def put(self, _, uuid):
with self.lock.exclusive(uuid) as locked:
(fd, name, dbname, path) = self.tmp_graph(uuid)
os.close(fd)
cleanup = [lambda: os.unlink(path)]
try:
# with self.graph(uuid, readonly=True, locked=locked) as g1, self.collection.graph(dbname, create=True, hook=False) as g2:
with self.graph(uuid, locked=locked) as g1, self.collection.graph(dbname, create=True, hook=False) as g2:
with g1.transaction(write=False) as t1, g2.transaction(write=True) as t2:
# fixme
cleanup.append(lambda: os.unlink('%s-lock' % path))
keep = self.input()
if keep is None:
keep = self.default_keep
if keep.get('kv',False):
self.clone_kv(t1, t2)
seeds = keep.get('seeds', None)
if seeds:
self.clone_seeds(uuid, t1, t2, seeds)
target = g1.path
cleanup.pop()() # unlink(path-lock)
try:
# fixme
os.unlink('%s-lock' % target)
except OSError:
pass
os.rename(path, target)
cleanup.pop() # unlink(path)
# bypass creds check, allow hooks to run
self.collection.remove(uuid)
# with self.collection.graph(uuid, readonly=True):
with self.collection.graph(uuid):
pass
except (IOError, OSError) as e:
if e.errno is errno.EPERM:
raise HTTPError(403, str(e))
elif e.errno is errno.ENOSPC:
raise HTTPError(507, str(e))
raise HTTPError(404, "Reset failed for graph %s: %s" % (uuid, repr(e)))
finally:
for x in cleanup:
try:
x()
except:
pass
def _add_watch_for_path(self, path):
# Must be called with _inotify_fd_lock held.
logging.debug('_add_watch_for_path(%r)', path)
for dirpath, directories, _ in itertools.chain(
[(os.path.dirname(path), [os.path.basename(path)], None)],
os.walk(path, topdown=True, followlinks=True)):
watcher_common.skip_ignored_dirs(directories)
# TODO: this is not an ideal solution as there are other ways for
# symlinks to confuse our algorithm but a general solution is going to
# be very complex and this is good enough to solve the immediate problem
# with Dart's directory structure.
watcher_common.skip_local_symlinks(
self._real_directories, dirpath, directories)
for directory in directories:
directory_path = os.path.join(dirpath, directory)
# dirpath cannot be used as the parent directory path because it is the
# empty string for symlinks :-(
parent_path = os.path.dirname(directory_path)
watch_descriptor = _libc.inotify_add_watch(
self._inotify_fd,
ctypes.create_string_buffer(directory_path),
_INTERESTING_INOTIFY_EVENTS)
if watch_descriptor < 0:
if ctypes.get_errno() == errno.ENOSPC:
logging.warning(
'There are too many directories in your application for '
'changes in all of them to be monitored. You may have to '
'restart the development server to see some changes to your '
'files.')
return
error = OSError('could not add watch for %r' % directory_path)
error.errno = ctypes.get_errno()
error.strerror = errno.errorcode[ctypes.get_errno()]
error.filename = directory_path
raise error
if parent_path in self._directory_to_subdirs:
self._directory_to_subdirs[parent_path].add(directory_path)
self._watch_to_directory[watch_descriptor] = directory_path
self._directory_to_watch_descriptor[directory_path] = watch_descriptor
self._directory_to_subdirs[directory_path] = set()
def backup_root(self, root, absroots):
logging.info('Backing up root %s', root)
self.progress.what('connecting to live data %s' % root)
self.reopen_fs(root)
self.progress.what('scanning for files in %s' % root)
absroot = self.fs.abspath('.')
# If the root is a file, we can just back up the file.
if os.path.isfile(root):
self.just_one_file = os.path.join(absroot, os.path.split(root)[1])
else:
self.just_one_file = None
self.root_metadata = self.fs.lstat(absroot)
num_dirs = 0
# The following is a very approximate guess, but we have no
# way of being exact.
dir_entry_size = 1000
flush_threshold = obnamlib.DEFAULT_DIR_BAG_BYTES / dir_entry_size
for pathname, metadata in self.find_files(absroot):
logging.info('Backing up %s', pathname)
if not self.pretend:
existed = self.repo.file_exists(self.new_generation, pathname)
try:
self.maybe_simulate_error(pathname)
if stat.S_ISDIR(metadata.st_mode):
self.backup_directory(pathname, metadata, absroots)
else:
self.backup_non_directory(pathname, metadata)
except (IOError, OSError) as e:
e2 = self.translate_enverror_to_obnamerror(pathname, e)
msg = 'Can\'t back up %s: %s' % (pathname, str(e2))
self.progress.error(msg, exc=e)
if not existed and not self.pretend:
self.remove_partially_backed_up_file(pathname)
if e.errno in (errno.ENOSPC, errno.EPIPE):
raise
if metadata.isdir() and not self.pretend:
num_dirs += 1
if num_dirs >= flush_threshold:
self.repo.flush_client(self.client_name)
self.app.dump_memory_profile('after flushing client')
num_dirs = 0
if self.checkpoint_manager.time_for_checkpoint():
self.make_checkpoint()
self.progress.what(pathname)
num_dirs = 0
self.backup_parents('.')
def _policy_RequestIG(self, books_needed):
'''Select books from IGs specified in interleave_request attribute.
If interleave_request_pos is present use it as the starting point.'''
db = self.LCEobj.db
ig_req = db.get_xattr(self.shelf, self.XATTR_IG_REQ)
self.LCEobj.errno = errno.ERANGE
assert ig_req is not None, \
'RequestIG policy requires prior %s' % self.XATTR_IG_REQ
assert len(ig_req), \
'RequestIG policy requires prior %s' % self.XATTR_IG_REQ
# Get a starting position for the interleave_request list
self.LCEobj.errno = errno.ENOSPC
pos = db.get_xattr(self.shelf, self.XATTR_IG_REQ_POS)
try:
ig_pos = int(pos)
if ig_pos < 0 or ig_pos > (len(ig_req) - 1):
ig_pos = 0
except TypeError as err: # TSNH, see create_shelf. Legacy paranoia.
ig_pos = 0
resp = db.create_xattr(self.shelf, self.XATTR_IG_REQ_POS, ig_pos)
except ValueError as err:
ig_pos = 0
reqIGs = [ord(ig_req[i:i+1]) for i in range(0, len(ig_req), 1)]
# Determine number of books needed from each IG
igCnt = defaultdict(int)
cur = ig_pos
for cnt in range(0, books_needed):
ig = reqIGs[cur % len(reqIGs)]
igCnt[ig] += 1
cur += 1
# Allocate specified number of books from each selected IG
booksIG = {}
for ig in igCnt.keys():
booksIG[ig] = db.get_books_by_intlv_group(
igCnt[ig], (ig, ), exclude=False)
# Build list of books using request_interleave pattern
self.LCEobj.errno = errno.ENOSPC
bookList = []
cur = ig_pos
for cnt in range(0, books_needed):
ig = reqIGs[cur % len(reqIGs)]
assert len(booksIG[ig]) != 0, 'Not enough books remaining in IG'
bookList.append(booksIG[ig].pop(0))
cur += 1
# Save current position in interleave_request list
db.modify_xattr(self.shelf, self.XATTR_IG_REQ_POS, cur % len(reqIGs))
return bookList