def test_release_vif_parent_not_found(self):
cls = nested_macvlan_vif.NestedMacvlanPodVIFDriver
m_driver = mock.Mock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
port_id = lib_utils.get_hash()
pod = mock.sentinel.pod
vif = mock.Mock()
vif.id = port_id
container_mac = mock.sentinel.mac_address
container_ip = mock.sentinel.ip_address
container_port = self._get_fake_port(port_id, container_ip,
container_mac)
neutron.show_port.return_value = container_port
m_driver.lock = mock.MagicMock(spec=threading.Lock())
m_driver._get_parent_port.side_effect = n_exc.NeutronClientException
self.assertRaises(n_exc.NeutronClientException, cls.release_vif,
m_driver, pod, vif)
neutron.show_port.assert_called_once_with(port_id)
m_driver._get_parent_port.assert_called_once_with(neutron, pod)
m_driver._remove_from_allowed_address_pairs.assert_not_called()
neutron.delete_port.assert_not_called()
python类Lock()的实例源码
def __init__(self, protocol, serverAddr, deviceId, deviceKey, deviceHandler):
self.serverAddr = serverAddr
self.protocol = protocol
self.deviceId = deviceId
self.deviceKey = deviceKey
self.lock = threading.Lock()
if self.protocol == "udp":
self.udpHeartbeatSeconds = 2
self.udpDataPacketInterval = 3
self.heartbeatCounter = 0
self.stateFile = "client.dat"
elif self.protocol == "ssl":
self.caCertFile = "servercert.pem"
self.deviceCertFile = "devicecert.pem"
self.deviceKeyFile = "devicekey.pem"
self.sslIntervalSeconds = 6
self.deviceHandler = deviceHandler
self.deviceHandler.service = self
def __init__(self, config):
self.service = None
self.webServer = None
self.config = config
self.httpsPort = int(self.config.get('web', 'httpsPort'))
self.httpPort = int(self.config.get('web', 'httpPort'))
self.adminPasswordHash = self.config.get('web', 'adminPasswordHash')
self.apiSecret = self.config.get('web', 'apiSecret')
self.uploadDir = self.config.get('web', 'uploadDir')
self.dbFile = self.config.get('web', 'dbFile')
self.httpsCertFile = self.config.get('web', 'httpsCertFile')
self.httpsKeyFile = self.config.get('web', 'httpsKeyFile')
self.httpsChainFile = self.config.get('web', 'httpsChainFile')
self.localVideoPort = int(self.config.get('web', 'localVideoPort'))
dir = os.path.dirname(os.path.realpath(sys.argv[0]))
self.database = database.Database(self.dbFile)
self.deviceConfig = dict()
for deviceId, jsonConf in dict(self.config.items('devices')).iteritems():
self.deviceConfig[deviceId] = json.loads(jsonConf, object_pairs_hook=OrderedDict)
self.trends = dict()
self.lock = threading.Lock()
def __init__(self, parent):
self.parent = parent
# Initialize variables for input data processing
self.data_queue = Queue.Queue()
self.empty_queue = False
# variables for thread management
self.is_running = True
self.timeout_check_period = 0.1 # this is in seconds
self.process_thread_released = False
# create mutex locks for handling issues with Reset
self.reset_lock = threading.Lock()
self.reset_signal = threading.Event()
# create and start the main thread
self.process_thread = threading.Thread(target=self.Process)
self.process_thread.start()
def __init__(self, parent):
self.parent = parent
# Initialize variables for input data processing
self.data_queue = Queue.Queue()
self.empty_queue = False
# variables for thread management
self.is_running = True
self.timeout_check_period = 0.1 # this is in seconds
self.process_thread_released = False
# create mutex locks for handling issues with Reset
self.reset_lock = threading.Lock()
self.reset_signal = threading.Event()
# create and start the main thread
self.process_thread = threading.Thread(target=self.Process)
self.process_thread.start()
def __init__(self, methodName='runTest', orbArgs=[]):
unittest.TestCase.__init__(self, methodName)
args = sys.argv
self.debuglevel = 3
for arg in args:
if '--debuglevel' in arg:
self.debuglevel = arg.split('=')[-1]
self._orb = CORBA.ORB_init(sys.argv + orbArgs, CORBA.ORB_ID)
self._poa = self._orb.resolve_initial_references("RootPOA")
self._poa._get_the_POAManager().activate()
self._ns = self._orb.resolve_initial_references("NameService")
self._root = self._ns._narrow(CosNaming.NamingContext)
# Maintain a registry of the DomainManager (there should normally be just one)
# and all spawned DeviceManagers, for easy cleanup.
self._domainBooter = None
self._domainManager = None
self._deviceLock = threading.Lock()
self._deviceBooters = []
self._deviceManagers = []
self._execparams = ""
def __init__(self, porttype):
"""
Instantiates a new object and generates a default StreamSRI. The
porttype parameter corresponds to the type of data contained in the
array of data being sent.
The porttype is also used in the connectPort() method to narrow the
connection
"""
self.port_type = porttype
self.outPorts = {}
self.refreshSRI = False
self.sri=bulkio_helpers.defaultSRI
self.port_lock = threading.Lock()
self.done = False
def __init__(self, porttype):
"""
Instantiates a new object responsible for writing data from the port
into an array.
It is important to notice that the porttype is a BULKIO__POA type and
not a BULKIO type. The reason is because it is used to generate a
Port class that will be returned when the getPort() is invoked. The
returned class is the one acting as a server and therefore must be a
Portable Object Adapter rather and a simple BULKIO object.
Inputs:
<porttype> The BULKIO__POA data type
"""
StreamMgr.__init__(self)
self.port_type = porttype
self.sri=bulkio_helpers.defaultSRI
self.data = []
self.timestamps = []
self.gotEOS = False
self.breakBlock = False
self.port_lock = threading.Lock()
self.port_cond = threading.Condition(self.port_lock)
def __init__(self, porttype):
"""
Instantiates a new object responsible for writing data from the port
into an array.
It is important to notice that the porttype is a BULKIO__POA type and
not a BULKIO type. The reason is because it is used to generate a
Port class that will be returned when the getPort() is invoked. The
returned class is the one acting as a server and therefore must be a
Portable Object Adapter rather and a simple BULKIO object.
Inputs:
<porttype> The BULKIO__POA data type
"""
self.port_type = porttype
self.sri=bulkio_helpers.defaultSRI
self.data = []
self.gotEOS = False
self.port_lock = threading.Lock()
self.valid_streams = {}
self.invalid_streams = {}
self.received_data = {}
def __init__(self, porttype, throttle=False):
"""
Instantiates a new object and generates a default StreamSRI. The
porttype parameter corresponds to the type of data contained in the
array of data being sent.
The porttype is also used in the connectPort() method to narrow the
connection
"""
self.porttype = porttype
self.outPorts = {}
self.refreshSRI = False
self.defaultStreamSRI = BULKIO.StreamSRI(1, 0.0, 0.001, 1, 0, 0.0,
0.001, 1, 0, "sampleStream",
True, [])
self.port_lock = threading.Lock()
self._throttle=throttle
self.done = False
def __init__(self, resource=None ):
self._mgr_lock = threading.Lock()
self._ecm = None
self._logger = logging.getLogger("ossie.events.Manager")
self._logger.setLevel(logging.INFO)
self._allow = True
self._registrations=[]
if resource :
try:
self._logger.debug("Requesting Domain Manager Access....")
dom = resource.getDomainManager()
self._logger.debug("Requesting EventChannelManager Access....")
self._ecm = dom.getRef()._get_eventChannelMgr()
self._logger.debug("Acquired reference to EventChannelManager")
except:
#print traceback.format_exc()
self._logger.warn("EventChannelManager - unable to resolve DomainManager's EventChannelManager ")
pass
def __init__(self, name, logger=None, attachDetachCallback=None, sriCmp=sri.compare, timeCmp=timestamp.compare, PortType = _TYPE_, newSriCallback=None, sriChangeCallback=None,interface=None):
self.name = name
self.logger = logger
self.port_lock = threading.Lock()
self.sri_query_lock = threading.Lock()
self._attachedStreams = {} # key=attach_id, value = (streamDef, userid)
self.stats = InStats(name, PortType )
self.sriDict = {} # key=streamID, value=(StreamSRI, PrecisionUTCTime)
self.attachDetachCallback = attachDetachCallback
self.newSriCallback = newSriCallback
self.sriChangeCallback = sriChangeCallback
self.sri_cmp = sriCmp
self.time_cmp = timeCmp
self.sriChanged = False
if not interface:
if self.logger:
self.logger.error("InAttachablePort __init__ - an interface must be specified, set to BULKIO.dataSDDS or BULKIO.dataVITA49")
raise Port.InvalidPort(1, "InAttachablePort __init__ - an interface must be specified, set to BULKIO.dataSDDS or BULKIO.dataVITA49")
self.interface=interface # BULKIO port interface (valid options are BULKIO.dataSDDS or BULKIO.dataVITA49)
self.setNewAttachDetachListener(attachDetachCallback)
if self.logger:
self.logger.debug("bulkio::InAttachablePort CTOR port:" + str(self.name) + " using interface " + str(self.interface))
def __init__(self, name, logger=None, attachDetachCallback=None, sriCmp=sri.compare, timeCmp=timestamp.compare, PortType = _TYPE_, newSriCallback=None, sriChangeCallback=None,interface=None):
self.name = name
self.logger = logger
self.port_lock = threading.Lock()
self.sri_query_lock = threading.Lock()
self._attachedStreams = {} # key=attach_id, value = (streamDef, userid)
self.stats = InStats(name, PortType )
self.sriDict = {} # key=streamID, value=(StreamSRI, PrecisionUTCTime)
self.attachDetachCallback = attachDetachCallback
self.newSriCallback = newSriCallback
self.sriChangeCallback = sriChangeCallback
self.sri_cmp = sriCmp
self.time_cmp = timeCmp
self.sriChanged = False
if not interface:
if self.logger:
self.logger.error("InAttachablePort __init__ - an interface must be specified, set to BULKIO.dataSDDS or BULKIO.dataVITA49")
raise Port.InvalidPort(1, "InAttachablePort __init__ - an interface must be specified, set to BULKIO.dataSDDS or BULKIO.dataVITA49")
self.interface=interface # BULKIO port interface (valid options are BULKIO.dataSDDS or BULKIO.dataVITA49)
self.setNewAttachDetachListener(attachDetachCallback)
if self.logger:
self.logger.debug("bulkio::InAttachablePort CTOR port:" + str(self.name) + " using interface " + str(self.interface))
def __init__(self, code=None, namespace=None, process_target=None, use_caching=False):
"""
Create a new signal.
"""
if not process_target:
process_target = self.process
self.process_target = process_target
self.receivers = list()
self.self_refs = dict()
self.lock = threading.Lock()
if code:
self.code = code
else:
self.code = self.Meta.code
if namespace:
self.namespace = namespace
else:
self.namespace = self.Meta.namespace
self.use_caching = use_caching
self.sender_receivers_cache = weakref.WeakKeyDictionary() if use_caching else {}
self._dead_receivers = False
def __init__(self, instance):
"""
Initiate registry with pre-loaded apps.
:param instance: Instance of the controller.
:type instance: pyplanet.core.instance.Instance
"""
self.instance = instance
self.apps = OrderedDict()
self.unloaded_apps = OrderedDict()
# Set ready states.
self.apps_ready = self.ready = False
# Set a lock for threading.
self._lock = threading.Lock()
# Listen to events
self.instance.signals.listen('contrib.mode:script_mode_changed', self._on_mode_change)
def new_build_thread(try_build):
import threading
for sub_pkg in list(try_build):
dumb_mutex = threading.Lock()
dumb_mutex.acquire()
try:
sub_thread = threading.Thread(
target=slave_thread_build, args=[sub_pkg])
sub_thread.start()
sub_thread.join()
dumb_mutex.release()
return 0
except:
err_msg(
'Sub-build process using thread {}, building \033[36m{}\033[0m \033[93mfailed!\033[0m'.format(sub_thread.name, sub_pkg))
return 128
def __init__(self, maxsize=0):
self.maxsize = maxsize
self._init(maxsize)
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the three conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self.mutex = _threading.Lock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self.not_empty = _threading.Condition(self.mutex)
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self.not_full = _threading.Condition(self.mutex)
# Notify all_tasks_done whenever the number of unfinished tasks
# drops to zero; thread waiting to join() is notified to resume
self.all_tasks_done = _threading.Condition(self.mutex)
self.unfinished_tasks = 0
def __init__(self, **kwargs):
self.agent_started = False
self.agent_destroyed = False
self.profiler_lock = threading.Lock()
self.main_thread_func = None
self.run_ts = None
self.run_id = None
self.config = Config(self)
self.config_loader = ConfigLoader(self)
self.message_queue = MessageQueue(self)
self.frame_cache = FrameCache(self)
self.process_reporter = ProcessReporter(self)
self.cpu_reporter = CPUReporter(self)
self.allocation_reporter = AllocationReporter(self)
self.block_reporter = BlockReporter(self)
self.error_reporter = ErrorReporter(self)
self.options = None
def simulate_lock():
lock = threading.Lock()
def lock_wait():
lock.acquire()
lock.release()
while True:
lock.acquire()
t = threading.Thread(target=lock_wait)
t.start()
time.sleep(1)
lock.release()
time.sleep(1)
def __init__(self, apply_light_policy_interval = 10, device_detection_interval = 10, device_offline_delay = 10, logging_level = logging.INFO):
self.__yeelight_detection_thread = None
self.__device_detection_thread = None
self.__device_detection_thread_woker = {}
self.__device_detection_thread_rlock = threading.Lock()
self.__thread_rlock = threading.Lock()
self.__apply_light_policy_thread = None
self.__current_geo = None
self.__compiled_policy = []
self.__compiled_policy_date = None
self.__device_on_monitor = []
self.__device_online = []
self.__device_detection_interval = device_detection_interval
self.__apply_light_policy_interval = apply_light_policy_interval
self.__device_offline_delay = device_offline_delay
self.__config = {}
self.__RUNNING = False
# a few setups
self.register_signal_handler()
self.__setup_log(logging_level = logging_level)
self.__logger.info("Controller instance created")
def __init__(self, args, retries=3, timeout=None):
"""Construct an instance.
Arguments:
args - Array of program arguments
retries - Number of times to try restarting the program before
giving up.
"""
self.args = args
self.retries = retries
self.timeout = timeout
self.lock = threading.Lock()
self.program = None
self.emitter = None
self.parser = None
self.__establish()
def __init__(self, filename, warn_on_readonly=True):
"""Initialize the class.
This will create the file if necessary.
"""
self._file = LockedFile(filename, 'r+', 'r')
self._thread_lock = threading.Lock()
self._read_only = False
self._warn_on_readonly = warn_on_readonly
self._create_file_if_needed()
# Cache of deserialized store. This is only valid after the
# _MultiStore is locked or _refresh_data_cache is called. This is
# of the form of:
#
# ((key, value), (key, value)...) -> OAuth2Credential
#
# If this is None, then the store hasn't been read yet.
self._data = None
def __init__(self, topology_settings):
self._settings = topology_settings
topology_description = TopologyDescription(
topology_settings.get_topology_type(),
topology_settings.get_server_descriptions(),
topology_settings.replica_set_name,
None,
None)
self._description = topology_description
# Store the seed list to help diagnose errors in _error_message().
self._seed_addresses = list(topology_description.server_descriptions())
self._opened = False
self._lock = threading.Lock()
self._condition = self._settings.condition_class(self._lock)
self._servers = {}
self._pid = None
def test_no_req_ids(self, *args):
in_flight = 3
get_holders = self.make_get_holders(1)
max_connection = Mock(spec=Connection, host='localhost',
lock=Lock(),
max_request_id=in_flight - 1, in_flight=in_flight,
is_idle=True, is_defunct=False, is_closed=False)
holder = get_holders.return_value[0]
holder.get_connections.return_value.append(max_connection)
self.run_heartbeat(get_holders)
holder.get_connections.assert_has_calls([call()] * get_holders.call_count)
self.assertEqual(max_connection.in_flight, in_flight)
self.assertEqual(max_connection.send_msg.call_count, 0)
self.assertEqual(max_connection.send_msg.call_count, 0)
max_connection.defunct.assert_has_calls([call(ANY)] * get_holders.call_count)
holder.return_connection.assert_has_calls(
[call(max_connection)] * get_holders.call_count)
def __init__(self, *args, **kwargs):
Connection.__init__(self, *args, **kwargs)
self.deque = deque()
self._deque_lock = Lock()
self._connect_socket()
self._socket.setblocking(0)
with self._libevloop._lock:
self._read_watcher = libev.IO(self._socket.fileno(), libev.EV_READ, self._libevloop._loop, self.handle_read)
self._write_watcher = libev.IO(self._socket.fileno(), libev.EV_WRITE, self._libevloop._loop, self.handle_write)
self._send_options_message()
self._libevloop.connection_created(self)
# start the global event loop if needed
self._libevloop.maybe_start()
def __init__(self):
self._pid = os.getpid()
self._loop_lock = Lock()
self._started = False
self._shutdown = False
self._thread = None
self._timers = TimerManager()
try:
dispatcher = self._loop_dispatch_class()
dispatcher.validate()
log.debug("Validated loop dispatch with %s", self._loop_dispatch_class)
except Exception:
log.exception("Failed validating loop dispatch with %s. Using busy wait execution instead.", self._loop_dispatch_class)
dispatcher.close()
dispatcher = _BusyWaitDispatcher()
self._loop_dispatcher = dispatcher
atexit.register(partial(_cleanup, weakref.ref(self)))
def __init__(self, host, host_distance, session):
self.host = host
self.host_distance = host_distance
self._session = weakref.proxy(session)
self._lock = Lock()
# this is used in conjunction with the connection streams. Not using the connection lock because the connection can be replaced in the lifetime of the pool.
self._stream_available_condition = Condition(self._lock)
self._is_replacing = False
if host_distance == HostDistance.IGNORED:
log.debug("Not opening connection to ignored host %s", self.host)
return
elif host_distance == HostDistance.REMOTE and not session.cluster.connect_to_remote_hosts:
log.debug("Not opening connection to remote host %s", self.host)
return
log.debug("Initializing connection for host %s", self.host)
self._connection = session.cluster.connection_factory(host.address)
self._keyspace = session.keyspace
if self._keyspace:
self._connection.set_keyspace_blocking(self._keyspace)
log.debug("Finished initializing connection for host %s", self.host)
def __init__(self, cluster, timeout,
schema_event_refresh_window,
topology_event_refresh_window,
status_event_refresh_window,
schema_meta_enabled=True,
token_meta_enabled=True):
# use a weak reference to allow the Cluster instance to be GC'ed (and
# shutdown) since implementing __del__ disables the cycle detector
self._cluster = weakref.proxy(cluster)
self._connection = None
self._timeout = timeout
self._schema_event_refresh_window = schema_event_refresh_window
self._topology_event_refresh_window = topology_event_refresh_window
self._status_event_refresh_window = status_event_refresh_window
self._schema_meta_enabled = schema_meta_enabled
self._token_meta_enabled = token_meta_enabled
self._lock = RLock()
self._schema_agreement_lock = Lock()
self._reconnection_handler = None
self._reconnection_lock = RLock()
self._event_schedule_times = {}
def __init__(self, maxsize=0):
try:
import threading
except ImportError:
import dummy_threading as threading
self._init(maxsize)
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the three conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self.mutex = threading.Lock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self.not_empty = threading.Condition(self.mutex)
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self.not_full = threading.Condition(self.mutex)
# Notify all_tasks_done whenever the number of unfinished tasks
# drops to zero; thread waiting to join() is notified to resume
self.all_tasks_done = threading.Condition(self.mutex)
self.unfinished_tasks = 0
def __init__(self, rules=None, default_subdomain='', charset='utf-8',
strict_slashes=True, redirect_defaults=True,
converters=None, sort_parameters=False, sort_key=None,
encoding_errors='replace', host_matching=False):
self._rules = []
self._rules_by_endpoint = {}
self._remap = True
self._remap_lock = Lock()
self.default_subdomain = default_subdomain
self.charset = charset
self.encoding_errors = encoding_errors
self.strict_slashes = strict_slashes
self.redirect_defaults = redirect_defaults
self.host_matching = host_matching
self.converters = self.default_converters.copy()
if converters:
self.converters.update(converters)
self.sort_parameters = sort_parameters
self.sort_key = sort_key
for rulefactory in rules or ():
self.add(rulefactory)