def sendEvents(self, events):
if not events:
return
self.state = PythonCollectionTask.STATE_SEND_EVENTS
if len(events) < 1:
return
# Default event fields.
for i, event in enumerate(events):
event.setdefault('device', self.configId)
event.setdefault('severity', ZenEventClasses.Info)
# On CTRL-C or exit the reactor might stop before we get to this
# call and generate a traceback.
if reactor.running:
#do in chunks of 100 to give time to reactor
self._eventService.sendEvent(event)
if i % 100:
yield task.deferLater(reactor, 0, lambda: None)
python类running()的实例源码
def test_check_daemons(self):
"""
The daemons are checked to be running every so often. When N=5 of these
checks fail, the daemon will be restarted.
"""
clock = Clock()
dog = WatchDog(clock,
broker=AsynchronousPingDaemon("test-broker"),
monitor=AsynchronousPingDaemon("test-monitor"),
manager=AsynchronousPingDaemon("test-manager"))
dog.start_monitoring()
for i in range(4):
clock.advance(5)
dog.broker.fire_running(False)
dog.monitor.fire_running(True)
dog.manager.fire_running(True)
self.assertEqual(dog.broker.boots, [])
clock.advance(5)
dog.broker.fire_running(False)
dog.monitor.fire_running(True)
dog.manager.fire_running(True)
self.assertEqual(dog.broker.boots, [STOP, START])
def test_dont_write_pid_file_until_we_really_start(
self, mock_watchdog, mock_daemonize, mock_reactor):
"""
If the client can't be started because another client is still running,
the client shouldn't be daemonized and the pid file shouldn't be
written.
"""
mock_watchdog().check_running.return_value = succeed([StubDaemon()])
mock_reactor.crash.return_value = None
self.log_helper.ignore_errors(
"ERROR: The following daemons are already running: program-name")
pid_file = self.makeFile()
self.configuration.daemon = True
self.configuration.pid_file = pid_file
service = WatchDogService(self.configuration)
service.startService()
self.assertFalse(os.path.exists(pid_file))
mock_daemonize.assert_not_called()
mock_watchdog().check_running.assert_called_once_with()
mock_watchdog().start.assert_not_called()
mock_reactor.crash.assert_called_once_with()
def disconnected(self, daemon):
self.mainwin.set_sensitive(False)
# If the reactor is not running at this point it means that we were
# closed normally.
if not reactor.running:
return
self.save_settings()
msg = _("Lost connection with the epoptes service.")
msg += "\n\n" + _("Make sure the service is running and then restart epoptes.")
dlg = gtk.MessageDialog(type=gtk.MESSAGE_ERROR, buttons=gtk.BUTTONS_OK,
message_format=msg)
dlg.set_title(_('Service connection error'))
dlg.run()
dlg.destroy()
reactor.stop()
# AMP callbacks
def _puller(self):
logger.debug('Starting puller loop')
while True:
if not reactor.running or self._stop:
logger.debug('Puller loop dying')
reactor.callFromThread(self.stopped.callback, None)
return
channels = [self.send_channel] + list(self._pull_channels)
if not channels:
time.sleep(0.05)
continue
channel, message = self.channel_layer.receive(channels, block=False)
if not channel:
time.sleep(0.01)
continue
logger.debug('We got message on channel: %s' % (channel, ))
reactor.callFromThread(self.handle_reply, channel, message)
def test_run_script(self):
"""
We run the script specified in the usage options and take whatever
is printed to stdout as the results of the test.
"""
processProtocol = ScriptProcessProtocol(self)
interpreter = self.localOptions['interpreter']
if not which(interpreter):
log.err('Unable to find %s executable in PATH.' % interpreter)
return
reactor.spawnProcess(processProtocol,
interpreter,
args=[interpreter, self.localOptions['script']],
env={'HOME': os.environ['HOME']},
usePTY=True)
if not reactor.running:
reactor.run()
return processProtocol.deferred
def execute_from_command_line():
# Limit concurrency in all thread-pools to ONE.
from maasserver.utils import threads
threads.install_default_pool(maxthreads=1)
threads.install_database_unpool(maxthreads=1)
# Disable all database connections in the reactor.
from maasserver.utils import orm
from twisted.internet import reactor
assert not reactor.running, "The reactor has been started too early."
reactor.callFromThread(orm.disable_all_database_connections)
# Configure logging; Django is no longer responsible for this. Behave as
# if we're always at an interactive terminal (i.e. do not wrap stdout or
# stderr with log machinery).
from provisioningserver import logger
logger.configure(mode=logger.LoggingMode.COMMAND)
# Hand over to Django.
from django.core import management
management.execute_from_command_line()
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
if not reactor.running:
reactor.run()
def maybe_start(self):
with self._lock:
if not reactor.running:
self._thread = Thread(target=reactor.run,
name="cassandra_driver_event_loop",
kwargs={'installSignalHandlers': False})
self._thread.daemon = True
self._thread.start()
atexit.register(partial(_cleanup, weakref.ref(self)))
def clientConnectionLost(self, connector, reason):
print 'Connection lost. Reason:', reason
if reactor.running:
reactor.stop()
def whoops(err):
if reactor.running:
log.err(err)
reactor.stop()
def whoops(err):
if reactor.running:
log.err(err)
reactor.stop()
def onDisconnect(self):
if reactor.running:
reactor.stop()
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
if not reactor.running:
reactor.run()
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
if not reactor.running:
reactor.run()
def run(self) -> None:
log.startLogging(open(datetime.now().strftime('logs/websocket_%d_%m_%Y.log'), 'a'))
factory = MyClientFactory(self.URL)
factory.protocol = self.protocol
connectWS(factory)
default_handler = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, signal.SIG_IGN)
reactor.run()
signal.signal(signal.SIGINT, default_handler)
if reactor.running:
reactor.stop()
def start(self, raw_args):
# XXX careful! Should see if the process in PID is running,
# avoid launching again.
import commands
cmd = 'bitmaskd'
if raw_args and ('--verbose' in raw_args or '-v' in raw_args):
cmd += ' --verbose'
commands.getoutput(cmd)
self.cfg.set('bonafide', 'active', "")
return defer.succeed(None)
def main():
def signal_handler(signal, frame):
if reactor.running:
reactor.stop()
sys.exit(0)
reactor.callWhenRunning(reactor.callLater, 0, execute)
signal.signal(signal.SIGINT, signal_handler)
reactor.run()
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
if not reactor.running:
reactor.run()
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
if not reactor.running:
reactor.run()
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
if not reactor.running:
reactor.run()
def is_running(self):
self.pings += 1
if self.deferred is not None:
raise AssertionError(
"is_running called while it's already running!")
self.deferred = Deferred()
return self.deferred
def test_die_when_broker_unavailable(self):
"""
If the broker is not running, the client should still be able to shut
down.
"""
self.log_helper.ignore_errors(
"Couldn't request that broker gracefully shut down; "
"killing forcefully.")
clock = Clock()
dog = WatchDog(clock,
broker=BoringDaemon("test-broker"),
monitor=BoringDaemon("test-monitor"),
manager=BoringDaemon("test-manager"))
# request_exit returns False when there's no broker, as tested by
# DaemonTest.test_request_exit_without_broker
dog.broker.request_exit = lambda: succeed(False)
# The manager's wait method never fires its deferred because nothing
# told it to die because the broker is dead!
manager_result = Deferred()
dog.manager.wait = lambda: manager_result
def stop():
manager_result.callback(True)
return succeed(True)
dog.manager.stop = stop
result = dog.request_exit()
return result
def test_simulate_broker_not_starting_up(self):
"""
When a daemon repeatedly dies, the watchdog gives up entirely and shuts
down.
"""
stop = []
stopped = []
self.log_helper.ignore_errors("Can't keep landscape-broker running. "
"Exiting.")
output_filename = self.makeFile("NOT RUN")
self._write_script(
"#!/bin/sh\necho RUN >> %s" % output_filename)
def got_result(result):
self.assertEqual(len(list(open(output_filename))),
MAXIMUM_CONSECUTIVE_RESTARTS)
self.assertTrue("Can't keep landscape-broker running." in
self.logfile.getvalue())
self.assertCountEqual([True], stopped)
reactor.stop = stop[0]
result = Deferred()
result.addCallback(got_result)
def mock_reactor_stop():
stop.append(reactor.stop)
reactor.stop = lambda: stopped.append(True)
reactor.callLater(0, mock_reactor_stop)
reactor.callLater(1, result.callback, None)
daemon = self.get_daemon(reactor=reactor)
daemon.BIN_DIR = self.config.bindir
daemon.start()
return result
def test_spawn_process_same_uid(self, getuid, getgid):
"""
If the daemon is specified to run as root, and the watchdog is running
as root, no uid or gid switching will occur.
"""
self._write_script("#!/bin/sh")
reactor = mock.Mock()
daemon = self.get_daemon(reactor=reactor, username="root")
daemon.BIN_DIR = self.config.bindir
daemon.start()
reactor.spawnProcess.assert_called_with(
mock.ANY, mock.ANY, args=mock.ANY, env=mock.ANY, uid=None,
gid=None)
def test_start_service_exits_when_already_running(
self, mock_bootstrap_list, mock_reactor):
self.log_helper.ignore_errors(
"ERROR: The following daemons are already running: program-name")
service = WatchDogService(self.configuration)
service.watchdog = mock.Mock()
service.watchdog.check_running.return_value = succeed([StubDaemon()])
result = service.startService()
self.assertEqual(service.exit_code, 1)
mock_bootstrap_list.bootstrap.assert_called_once_with(
data_path=self.data_path, log_dir=self.log_dir)
service.watchdog.check_running.assert_called_once_with()
self.assertTrue(mock_reactor.crash.called)
return result
def run(self):
self.running = True
def test_landscape_user(self):
"""
The watchdog *can* be run as the 'landscape' user.
"""
self.fake_pwd.addUser(
"landscape", None, os.getuid(), None, None, None, None)
reactor = FakeReactor()
with mock.patch("landscape.client.watchdog.pwd", new=self.fake_pwd):
run(["--log-dir", self.makeDir()], reactor=reactor)
self.assertTrue(reactor.running)
def stopTransmissions(self, widget):
self.execOnClients(['stop_receptions'], self.cstore,
mode=EM_SYSTEM_AND_SESSION)
if not self.vncserver is None:
self.vncserver.kill()
self.vncserver = None
## FIXME FIXME: Should we allow for running arbitrary commands in clients?
def run(cls):
runner = CrawlerRunner(get_project_settings())
@defer.inlineCallbacks
def deferred_crawl():
for spider, args, kwargs in cls.queue:
try:
yield runner.crawl(spider, *args, **kwargs)
except KeyError as err:
# Log a warning if the scraper name is invalid instead of
# causing the job to fail.
# NOTE: If there is any other type of error, the job will
# fail, and all the jobs that depend on it will fail as
# well.
logger.warning(err.args[0])
# XXX: If all the names fail, then trying to run
# `reactor.stop()` will give an "Unhandled error in
# Deferred" complaint and hang. It will also hang in
# general if no spiders have been run. I assume there's
# some twisted-way to handle this, but for now, just log an
# error.
if reactor.running:
reactor.stop()
else:
logger.critical("LocalQueue: No valid scraper names found.")
deferred_crawl()
reactor.run()