def __init__(self, app=None, use_native_unicode=True, session_options=None, metadata=None):
if session_options is None:
session_options = {}
session_options.setdefault('scopefunc', connection_stack.__ident_func__)
self.use_native_unicode = use_native_unicode
self.session = self.create_scoped_session(session_options)
self.Model = self.make_declarative_base(metadata)
self.Query = BaseQuery
self._engine_lock = Lock()
self.app = app
_include_sqlalchemy(self)
if app is not None:
self.init_app(app)
python类Lock()的实例源码
def __init__(self, reader, partition, discretizer, normalizer,
batch_size, steps, shuffle):
self.reader = reader
self.partition = partition
self.discretizer = discretizer
self.normalizer = normalizer
self.batch_size = batch_size
if steps is None:
self.n_examples = reader.get_number_of_examples()
self.steps = (self.n_examples + batch_size - 1) // batch_size
else:
self.n_examples = steps * batch_size
self.steps = steps
self.shuffle = shuffle
self.chunk_size = min(1024, steps) * batch_size
self.lock = threading.Lock()
self.generator = self._generator()
def __init__(self, reader, discretizer, normalizer,
batch_size, steps, shuffle):
self.reader = reader
self.discretizer = discretizer
self.normalizer = normalizer
self.batch_size = batch_size
if steps is None:
self.n_examples = reader.get_number_of_examples()
self.steps = (self.n_examples + batch_size - 1) // batch_size
else:
self.n_examples = steps * batch_size
self.steps = steps
self.shuffle = shuffle
self.chunk_size = min(1024, steps) * batch_size
self.lock = threading.Lock()
self.generator = self._generator()
def __init__(self, input_transcoder, output_transcoder, command=None, cwd=None, env=None):
self.master = None
self.slave = None
self.process = None
self.input_transcoder = input_transcoder
self.output_transcoder = output_transcoder
self.command = command
self.cwd = cwd
self.env = env
self.mutex = Lock()
self.read_thread = None
self.write_thread = None
self.stop = False
def __init__(self, split='train', folder=None, mode='train', num_replica=1, subtract_mean=True):
"""
Mode: train or valid or test
Train: Random scale, random crop
Valid: Single center crop
Test: use 10-crop testing... Something that we haven't implemented yet.
"""
super(ImageNetDataProvider, self).__init__()
self.log = tfplus.utils.logger.get()
self._split = split
self._folder = folder
self._img_ids = None
self._labels = None
self._mode = mode
self._rnd_proc = ImagePreprocessor(
rnd_hflip=True, rnd_colour=False, rnd_resize=[256, 256], resize=256,
crop=224)
self._mean_img = np.array(
[103.062623801, 115.902882574, 123.151630838], dtype='float32')
self._mutex = threading.Lock()
self.register_option('imagenet:dataset_folder')
self._num_replica = num_replica
pass
def __init__(self, rules=None, default_subdomain='', charset='utf-8',
strict_slashes=True, redirect_defaults=True,
converters=None, sort_parameters=False, sort_key=None,
encoding_errors='replace', host_matching=False):
self._rules = []
self._rules_by_endpoint = {}
self._remap = True
self._remap_lock = Lock()
self.default_subdomain = default_subdomain
self.charset = charset
self.encoding_errors = encoding_errors
self.strict_slashes = strict_slashes
self.redirect_defaults = redirect_defaults
self.host_matching = host_matching
self.converters = self.default_converters.copy()
if converters:
self.converters.update(converters)
self.sort_parameters = sort_parameters
self.sort_key = sort_key
for rulefactory in rules or ():
self.add(rulefactory)
def __init__(self, filename, warn_on_readonly=True):
"""Initialize the class.
This will create the file if necessary.
"""
self._file = LockedFile(filename, 'r+b', 'rb')
self._thread_lock = threading.Lock()
self._read_only = False
self._warn_on_readonly = warn_on_readonly
self._create_file_if_needed()
# Cache of deserialized store. This is only valid after the
# _MultiStore is locked or _refresh_data_cache is called. This is
# of the form of:
#
# ((key, value), (key, value)...) -> OAuth2Credential
#
# If this is None, then the store hasn't been read yet.
self._data = None
def _lock(self):
"""Lock the entire multistore."""
self._thread_lock.acquire()
self._file.open_and_lock()
if not self._file.is_locked():
self._read_only = True
if self._warn_on_readonly:
logger.warn('The credentials file (%s) is not writable. Opening in '
'read-only mode. Any refreshed credentials will only be '
'valid for this run.' % self._file.filename())
if os.path.getsize(self._file.filename()) == 0:
logger.debug('Initializing empty multistore file')
# The multistore is empty so write out an empty file.
self._data = {}
self._write()
elif not self._read_only or self._data is None:
# Only refresh the data if we are read/write or we haven't
# cached the data yet. If we are readonly, we assume is isn't
# changing out from under us and that we only have to read it
# once. This prevents us from whacking any new access keys that
# we have cached in memory but were unable to write out.
self._refresh_data_cache()
def init_static_locks(cls):
with cls._lock_init_lock:
cls._ensure_ffi_initialized()
if not cls._lock_cb_handle:
wrapper = ffi_callback(
"void(int, int, const char *, int)",
name="Cryptography_locking_cb",
)
cls._lock_cb_handle = wrapper(cls._lock_cb)
# Use Python's implementation if available, importing _ssl triggers
# the setup for this.
__import__("_ssl")
if cls.lib.CRYPTO_get_locking_callback() != cls.ffi.NULL:
return
# If nothing else has setup a locking callback already, we set up
# our own
num_locks = cls.lib.CRYPTO_num_locks()
cls._locks = [threading.Lock() for n in range(num_locks)]
cls.lib.CRYPTO_set_locking_callback(cls._lock_cb_handle)
def initialize(self, impl, time_func=None, **kwargs):
super(PollIOLoop, self).initialize(**kwargs)
self._impl = impl
if hasattr(self._impl, 'fileno'):
set_close_exec(self._impl.fileno())
self.time_func = time_func or time.time
self._handlers = {}
self._events = {}
self._callbacks = []
self._callback_lock = threading.Lock()
self._timeouts = []
self._cancellations = 0
self._running = False
self._stopped = False
self._closing = False
self._thread_ident = None
self._blocking_signal_threshold = None
self._timeout_counter = itertools.count()
# Create a pipe that we send bogus data to when we want to wake
# the I/O loop when it is idle
self._waker = Waker()
self.add_handler(self._waker.fileno(),
lambda fd, events: self._waker.consume(),
self.READ)
def init_static_locks(cls):
with cls._lock_init_lock:
cls._ensure_ffi_initialized()
if not cls._lock_cb_handle:
wrapper = ffi_callback(
"void(int, int, const char *, int)",
name="Cryptography_locking_cb",
)
cls._lock_cb_handle = wrapper(cls._lock_cb)
# Use Python's implementation if available, importing _ssl triggers
# the setup for this.
__import__("_ssl")
if cls.lib.CRYPTO_get_locking_callback() != cls.ffi.NULL:
return
# If nothing else has setup a locking callback already, we set up
# our own
num_locks = cls.lib.CRYPTO_num_locks()
cls._locks = [threading.Lock() for n in range(num_locks)]
cls.lib.CRYPTO_set_locking_callback(cls._lock_cb_handle)
def __init__(self, computation, host='', port=8181, poll_sec=10, DocumentRoot=None,
keyfile=None, certfile=None, show_task_args=True):
self._lock = threading.Lock()
if not DocumentRoot:
DocumentRoot = os.path.join(os.path.dirname(__file__), 'data')
self._nodes = {}
self._updates = {}
if poll_sec < 1:
pycos.logger.warning('invalid poll_sec value %s; it must be at least 1', poll_sec)
poll_sec = 1
self._poll_sec = poll_sec
self._show_args = bool(show_task_args)
self._server = BaseHTTPServer.HTTPServer((host, port), lambda *args:
HTTPServer._HTTPRequestHandler(self, DocumentRoot, *args))
if certfile:
self._server.socket = ssl.wrap_socket(self._server.socket, keyfile=keyfile,
certfile=certfile, server_side=True)
self._httpd_thread = threading.Thread(target=self._server.serve_forever)
self._httpd_thread.daemon = True
self._httpd_thread.start()
self.computation = computation
self.status_task = pycos.Task(self.status_proc)
if computation.status_task:
client_task = computation.status_task
def chain_msgs(task=None):
task.set_daemon()
while 1:
msg = yield task.receive()
self.status_task.send(msg)
client_task.send(msg)
computation.status_task = pycos.Task(chain_msgs)
else:
computation.status_task = self.status_task
pycos.logger.info('Started HTTP%s server at %s',
's' if certfile else '', str(self._server.socket.getsockname()))
def __init__(self, computation, host='', port=8181, poll_sec=10, DocumentRoot=None,
keyfile=None, certfile=None, show_task_args=True):
self._lock = threading.Lock()
if not DocumentRoot:
DocumentRoot = os.path.join(os.path.dirname(__file__), 'data')
self._nodes = {}
self._updates = {}
if poll_sec < 1:
pycos.logger.warning('invalid poll_sec value %s; it must be at least 1', poll_sec)
poll_sec = 1
self._poll_sec = poll_sec
self._show_args = bool(show_task_args)
self._server = BaseHTTPServer.HTTPServer((host, port), lambda *args:
HTTPServer._HTTPRequestHandler(self, DocumentRoot, *args))
if certfile:
self._server.socket = ssl.wrap_socket(self._server.socket, keyfile=keyfile,
certfile=certfile, server_side=True)
self._httpd_thread = threading.Thread(target=self._server.serve_forever)
self._httpd_thread.daemon = True
self._httpd_thread.start()
self.computation = computation
self.status_task = pycos.Task(self.status_proc)
if computation.status_task:
client_task = computation.status_task
def chain_msgs(task=None):
task.set_daemon()
while 1:
msg = yield task.receive()
self.status_task.send(msg)
client_task.send(msg)
computation.status_task = pycos.Task(chain_msgs)
else:
computation.status_task = self.status_task
pycos.logger.info('Started HTTP%s server at %s',
's' if certfile else '', str(self._server.socket.getsockname()))
def test_request_vif(self, m_to_vif):
cls = nested_macvlan_vif.NestedMacvlanPodVIFDriver
m_driver = mock.Mock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
pod = mock.sentinel.pod
project_id = mock.sentinel.project_id
subnets = mock.sentinel.subnets
security_groups = mock.sentinel.security_groups
container_mac = mock.sentinel.mac_address
container_ip = mock.sentinel.ip_address
container_port = self._get_fake_port(mac_address=container_mac,
ip_address=container_ip)
vif = mock.Mock()
port_request = mock.sentinel.port_request
vm_port = self._get_fake_port()
m_to_vif.return_value = vif
m_driver._get_port_request.return_value = port_request
m_driver._get_parent_port.return_value = vm_port
m_driver.lock = mock.MagicMock(spec=threading.Lock())
neutron.create_port.return_value = container_port
self.assertEqual(vif, cls.request_vif(m_driver, pod, project_id,
subnets, security_groups))
m_driver._get_port_request.assert_called_once_with(
pod, project_id, subnets, security_groups)
neutron.create_port.assert_called_once_with(port_request)
m_driver._get_parent_port.assert_called_once_with(neutron, pod)
m_driver._add_to_allowed_address_pairs.assert_called_once_with(
neutron, vm_port, frozenset([container_ip]), container_mac)
m_to_vif.assert_called_once_with(container_port['port'], subnets)
def test_request_vif_parent_not_found(self, m_to_vif):
cls = nested_macvlan_vif.NestedMacvlanPodVIFDriver
m_driver = mock.Mock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
pod = mock.sentinel.pod
project_id = mock.sentinel.project_id
subnets = mock.sentinel.subnets
security_groups = mock.sentinel.security_groups
container_mac = mock.sentinel.mac_address
container_ip = mock.sentinel.ip_address
container_port = self._get_fake_port(mac_address=container_mac,
ip_address=container_ip)
port_request = mock.sentinel.port_request
m_driver._get_port_request.return_value = port_request
m_driver.lock = mock.MagicMock(spec=threading.Lock())
neutron.create_port.return_value = container_port
m_driver._get_parent_port.side_effect = n_exc.NeutronClientException
self.assertRaises(n_exc.NeutronClientException, cls.request_vif,
m_driver, pod, project_id, subnets, security_groups)
m_driver._get_port_request.assert_called_once_with(
pod, project_id, subnets, security_groups)
neutron.create_port.assert_called_once_with(port_request)
m_driver._get_parent_port.assert_called_once_with(neutron, pod)
m_driver._add_to_allowed_address_pairs.assert_not_called()
m_to_vif.assert_not_called()
def test_release_vif(self):
cls = nested_macvlan_vif.NestedMacvlanPodVIFDriver
m_driver = mock.Mock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
port_id = lib_utils.get_hash()
pod = mock.sentinel.pod
vif = mock.Mock()
vif.id = port_id
container_mac = mock.sentinel.mac_address
container_ip = mock.sentinel.ip_address
container_port = self._get_fake_port(port_id, container_ip,
container_mac)
neutron.show_port.return_value = container_port
vm_port = self._get_fake_port()
m_driver._get_parent_port.return_value = vm_port
m_driver.lock = mock.MagicMock(spec=threading.Lock())
cls.release_vif(m_driver, pod, vif)
neutron.show_port.assert_called_once_with(port_id)
m_driver._get_parent_port.assert_called_once_with(neutron, pod)
m_driver._remove_from_allowed_address_pairs.assert_called_once_with(
neutron, vm_port, frozenset([container_ip]), container_mac)
neutron.delete_port.assert_called_once_with(vif.id)
def test_release_vif_delete_failed(self):
cls = nested_macvlan_vif.NestedMacvlanPodVIFDriver
m_driver = mock.Mock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
port_id = lib_utils.get_hash()
pod = mock.sentinel.pod
vif = mock.Mock()
vif.id = port_id
container_mac = mock.sentinel.mac_address
container_ip = mock.sentinel.ip_addresses
container_port = self._get_fake_port(port_id, container_ip,
container_mac)
neutron.show_port.return_value = container_port
neutron.delete_port.side_effect = n_exc.PortNotFoundClient
vm_port = self._get_fake_port()
m_driver._get_parent_port.return_value = vm_port
m_driver.lock = mock.MagicMock(spec=threading.Lock())
cls.release_vif(m_driver, pod, vif)
neutron.show_port.assert_called_once_with(port_id)
m_driver._get_parent_port.assert_called_once_with(neutron, pod)
m_driver._remove_from_allowed_address_pairs.assert_called_once_with(
neutron, vm_port, frozenset([container_ip]), container_mac)
neutron.delete_port.assert_called_once_with(vif.id)
def __init__(self,path,password=None,server_id=None, disable_auto_login=True,timeout=10):
self.timeout = timeout
self.server_id = server_id
self.password = password
self.disable_auto_login = disable_auto_login
if disable_auto_login:
self.client = toxclient.Toxclient(path)
else:
self.client = toxclient.Toxclient(path,password)
self.exec_lock = threading.Lock()
def startTh():
q1 = Queue.Queue(10)
ql1 = threading.Lock()
collect = porterThread('collect', q1, ql1, interval=3)
collect.start()
time.sleep(0.5)
sendjson = porterThread('sendjson', q1, ql1, interval=3)
sendjson.start()
#print "start"
collect.join()
sendjson.join()
def __init__(self, host, port):
self.buf = deque()
self.handle = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.handle.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
#self.handle.connect((host, port))
self.handle.bind(("", port))
except socket.error, msg:
pass
super(UDPDriver, self).__init__(self.handle)
self._write_lock = threading.Lock()