forked from M-Labs/artiq
1
0
Fork 0

use Python 3.5 coroutines

This commit is contained in:
Sebastien Bourdeauducq 2015-10-03 19:28:57 +08:00
parent b117b9320d
commit f552d62b69
19 changed files with 228 additions and 304 deletions

View File

@ -56,60 +56,55 @@ class Controller:
self.process = None self.process = None
self.launch_task = asyncio.Task(self.launcher()) self.launch_task = asyncio.Task(self.launcher())
@asyncio.coroutine async def end(self):
def end(self):
self.launch_task.cancel() self.launch_task.cancel()
yield from asyncio.wait_for(self.launch_task, None) await asyncio.wait_for(self.launch_task, None)
@asyncio.coroutine async def _call_controller(self, method):
def _call_controller(self, method):
remote = AsyncioClient() remote = AsyncioClient()
yield from remote.connect_rpc(self.host, self.port, None) await remote.connect_rpc(self.host, self.port, None)
try: try:
targets, _ = remote.get_rpc_id() targets, _ = remote.get_rpc_id()
remote.select_rpc_target(targets[0]) remote.select_rpc_target(targets[0])
r = yield from getattr(remote, method)() r = await getattr(remote, method)()
finally: finally:
remote.close_rpc() remote.close_rpc()
return r return r
@asyncio.coroutine async def _ping(self):
def _ping(self):
try: try:
ok = yield from asyncio.wait_for(self._call_controller("ping"), ok = await asyncio.wait_for(self._call_controller("ping"),
self.ping_timeout) self.ping_timeout)
if ok: if ok:
self.retry_timer_cur = self.retry_timer self.retry_timer_cur = self.retry_timer
return ok return ok
except: except:
return False return False
@asyncio.coroutine async def _wait_and_ping(self):
def _wait_and_ping(self):
while True: while True:
try: try:
yield from asyncio.wait_for(self.process.wait(), await asyncio.wait_for(self.process.wait(),
self.ping_timer) self.ping_timer)
except asyncio.TimeoutError: except asyncio.TimeoutError:
logger.debug("pinging controller %s", self.name) logger.debug("pinging controller %s", self.name)
ok = yield from self._ping() ok = await self._ping()
if not ok: if not ok:
logger.warning("Controller %s ping failed", self.name) logger.warning("Controller %s ping failed", self.name)
yield from self._terminate() await self._terminate()
return return
else: else:
break break
@asyncio.coroutine async def launcher(self):
def launcher(self):
try: try:
while True: while True:
logger.info("Starting controller %s with command: %s", logger.info("Starting controller %s with command: %s",
self.name, self.command) self.name, self.command)
try: try:
self.process = yield from asyncio.create_subprocess_exec( self.process = await asyncio.create_subprocess_exec(
*shlex.split(self.command)) *shlex.split(self.command))
yield from self._wait_and_ping() await self._wait_and_ping()
except FileNotFoundError: except FileNotFoundError:
logger.warning("Controller %s failed to start", self.name) logger.warning("Controller %s failed to start", self.name)
else: else:
@ -117,33 +112,32 @@ class Controller:
logger.warning("Restarting in %.1f seconds", logger.warning("Restarting in %.1f seconds",
self.retry_timer_cur) self.retry_timer_cur)
try: try:
yield from asyncio.wait_for(self.retry_now.wait(), await asyncio.wait_for(self.retry_now.wait(),
self.retry_timer_cur) self.retry_timer_cur)
except asyncio.TimeoutError: except asyncio.TimeoutError:
pass pass
self.retry_timer_cur *= self.retry_timer_backoff self.retry_timer_cur *= self.retry_timer_backoff
except asyncio.CancelledError: except asyncio.CancelledError:
yield from self._terminate() await self._terminate()
@asyncio.coroutine async def _terminate(self):
def _terminate(self):
logger.info("Terminating controller %s", self.name) logger.info("Terminating controller %s", self.name)
if self.process is not None and self.process.returncode is None: if self.process is not None and self.process.returncode is None:
try: try:
yield from asyncio.wait_for(self._call_controller("terminate"), await asyncio.wait_for(self._call_controller("terminate"),
self.term_timeout) self.term_timeout)
except: except:
logger.warning("Controller %s did not respond to terminate " logger.warning("Controller %s did not respond to terminate "
"command, killing", self.name) "command, killing", self.name)
self.process.kill() self.process.kill()
try: try:
yield from asyncio.wait_for(self.process.wait(), await asyncio.wait_for(self.process.wait(),
self.term_timeout) self.term_timeout)
except: except:
logger.warning("Controller %s failed to exit, killing", logger.warning("Controller %s failed to exit, killing",
self.name) self.name)
self.process.kill() self.process.kill()
yield from self.process.wait() await self.process.wait()
logger.debug("Controller %s terminated", self.name) logger.debug("Controller %s terminated", self.name)
@ -163,17 +157,16 @@ class Controllers:
self.active = dict() self.active = dict()
self.process_task = asyncio.Task(self._process()) self.process_task = asyncio.Task(self._process())
@asyncio.coroutine async def _process(self):
def _process(self):
while True: while True:
action, param = yield from self.queue.get() action, param = await self.queue.get()
if action == "set": if action == "set":
k, ddb_entry = param k, ddb_entry = param
if k in self.active: if k in self.active:
yield from self.active[k].end() await self.active[k].end()
self.active[k] = Controller(k, ddb_entry) self.active[k] = Controller(k, ddb_entry)
elif action == "del": elif action == "del":
yield from self.active[param].end() await self.active[param].end()
del self.active[param] del self.active[param]
else: else:
raise ValueError raise ValueError
@ -196,11 +189,10 @@ class Controllers:
for name in set(self.active_or_queued): for name in set(self.active_or_queued):
del self[name] del self[name]
@asyncio.coroutine async def shutdown(self):
def shutdown(self):
self.process_task.cancel() self.process_task.cancel()
for c in self.active.values(): for c in self.active.values():
yield from c.end() await c.end()
class ControllerDB: class ControllerDB:
@ -225,8 +217,7 @@ class ControllerManager(TaskObject):
self.retry_master = retry_master self.retry_master = retry_master
self.controller_db = ControllerDB() self.controller_db = ControllerDB()
@asyncio.coroutine async def _do(self):
def _do(self):
try: try:
subscriber = Subscriber("devices", subscriber = Subscriber("devices",
self.controller_db.sync_struct_init) self.controller_db.sync_struct_init)
@ -236,12 +227,12 @@ class ControllerManager(TaskObject):
s = subscriber.writer.get_extra_info("socket") s = subscriber.writer.get_extra_info("socket")
localhost = s.getsockname()[0] localhost = s.getsockname()[0]
self.controller_db.set_host_filter(localhost) self.controller_db.set_host_filter(localhost)
yield from subscriber.connect(self.server, self.port, await subscriber.connect(self.server, self.port,
set_host_filter) set_host_filter)
try: try:
yield from asyncio.wait_for(subscriber.receive_task, None) await asyncio.wait_for(subscriber.receive_task, None)
finally: finally:
yield from subscriber.close() await subscriber.close()
except (ConnectionAbortedError, ConnectionError, except (ConnectionAbortedError, ConnectionError,
ConnectionRefusedError, ConnectionResetError) as e: ConnectionRefusedError, ConnectionResetError) as e:
logger.warning("Connection to master failed (%s: %s)", logger.warning("Connection to master failed (%s: %s)",
@ -249,11 +240,11 @@ class ControllerManager(TaskObject):
else: else:
logger.warning("Connection to master lost") logger.warning("Connection to master lost")
logger.warning("Retrying in %.1f seconds", self.retry_master) logger.warning("Retrying in %.1f seconds", self.retry_master)
yield from asyncio.sleep(self.retry_master) await asyncio.sleep(self.retry_master)
except asyncio.CancelledError: except asyncio.CancelledError:
pass pass
finally: finally:
yield from self.controller_db.current_controllers.shutdown() await self.controller_db.current_controllers.shutdown()
def retry_now(self, k): def retry_now(self, k):
"""If a controller is disabled and pending retry, perform that retry """If a controller is disabled and pending retry, perform that retry

View File

@ -96,24 +96,23 @@ class DBWriter(TaskObject):
logger.warning("failed to update parameter '%s': " logger.warning("failed to update parameter '%s': "
"too many pending updates", k) "too many pending updates", k)
@asyncio.coroutine async def _do(self):
def _do(self):
while True: while True:
k, v = yield from self._queue.get() k, v = await self._queue.get()
url = self.base_url + "/write" url = self.base_url + "/write"
params = {"u": self.user, "p": self.password, "db": self.database, params = {"u": self.user, "p": self.password, "db": self.database,
"consistency": "any", "precision": "n"} "consistency": "any", "precision": "n"}
fmt_ty, fmt_v = format_influxdb(v) fmt_ty, fmt_v = format_influxdb(v)
data = "{},parameter={} {}={}".format(self.table, k, fmt_ty, fmt_v) data = "{},parameter={} {}={}".format(self.table, k, fmt_ty, fmt_v)
try: try:
response = yield from aiohttp.request( response = await aiohttp.request(
"POST", url, params=params, data=data) "POST", url, params=params, data=data)
except: except:
logger.warning("got exception trying to update '%s'", logger.warning("got exception trying to update '%s'",
k, exc_info=True) k, exc_info=True)
else: else:
if response.status not in (200, 204): if response.status not in (200, 204):
content = (yield from response.content.read()).decode() content = (await response.content.read()).decode()
if content: if content:
content = content[:-1] # drop \n content = content[:-1] # drop \n
logger.warning("got HTTP status %d " logger.warning("got HTTP status %d "
@ -144,18 +143,17 @@ class MasterReader(TaskObject):
self.filter_function = filter_function self.filter_function = filter_function
self.writer = writer self.writer = writer
@asyncio.coroutine async def _do(self):
def _do(self):
subscriber = Subscriber( subscriber = Subscriber(
"parameters", "parameters",
partial(Parameters, self.filter_function, self.writer)) partial(Parameters, self.filter_function, self.writer))
while True: while True:
try: try:
yield from subscriber.connect(self.server, self.port) await subscriber.connect(self.server, self.port)
try: try:
yield from asyncio.wait_for(subscriber.receive_task, None) await asyncio.wait_for(subscriber.receive_task, None)
finally: finally:
yield from subscriber.close() await subscriber.close()
except (ConnectionAbortedError, ConnectionError, except (ConnectionAbortedError, ConnectionError,
ConnectionRefusedError, ConnectionResetError) as e: ConnectionRefusedError, ConnectionResetError) as e:
logger.warning("Connection to master failed (%s: %s)", logger.warning("Connection to master failed (%s: %s)",
@ -163,7 +161,7 @@ class MasterReader(TaskObject):
else: else:
logger.warning("Connection to master lost") logger.warning("Connection to master lost")
logger.warning("Retrying in %.1f seconds", self.retry) logger.warning("Retrying in %.1f seconds", self.retry)
yield from asyncio.sleep(self.retry) await asyncio.sleep(self.retry)
class Filter: class Filter:

View File

@ -300,23 +300,20 @@ class ExplorerDock(dockarea.Dock):
def enable_duedate(self): def enable_duedate(self):
self.datetime_en.setChecked(True) self.datetime_en.setChecked(True)
@asyncio.coroutine async def sub_connect(self, host, port):
def sub_connect(self, host, port):
self.explist_subscriber = Subscriber("explist", self.explist_subscriber = Subscriber("explist",
self.init_explist_model) self.init_explist_model)
yield from self.explist_subscriber.connect(host, port) await self.explist_subscriber.connect(host, port)
@asyncio.coroutine async def sub_close(self):
def sub_close(self): await self.explist_subscriber.close()
yield from self.explist_subscriber.close()
def init_explist_model(self, init): def init_explist_model(self, init):
self.explist_model = _ExplistModel(self, self.el, init) self.explist_model = _ExplistModel(self, self.el, init)
self.el.setModel(self.explist_model) self.el.setModel(self.explist_model)
return self.explist_model return self.explist_model
@asyncio.coroutine async def submit(self, pipeline_name, file, class_name, arguments,
def submit(self, pipeline_name, file, class_name, arguments,
priority, due_date, flush): priority, due_date, flush):
expid = { expid = {
"repo_rev": None, "repo_rev": None,
@ -324,8 +321,8 @@ class ExplorerDock(dockarea.Dock):
"class_name": class_name, "class_name": class_name,
"arguments": arguments, "arguments": arguments,
} }
rid = yield from self.schedule_ctl.submit(pipeline_name, expid, rid = await self.schedule_ctl.submit(pipeline_name, expid,
priority, due_date, flush) priority, due_date, flush)
self.status_bar.showMessage("Submitted RID {}".format(rid)) self.status_bar.showMessage("Submitted RID {}".format(rid))
def submit_clicked(self): def submit_clicked(self):

View File

@ -41,14 +41,12 @@ class LogDock(dockarea.Dock):
self.addWidget(self.log) self.addWidget(self.log)
self.scroll_at_bottom = False self.scroll_at_bottom = False
@asyncio.coroutine async def sub_connect(self, host, port):
def sub_connect(self, host, port):
self.subscriber = Subscriber("log", self.init_log_model) self.subscriber = Subscriber("log", self.init_log_model)
yield from self.subscriber.connect(host, port) await self.subscriber.connect(host, port)
@asyncio.coroutine async def sub_close(self):
def sub_close(self): await self.subscriber.close()
yield from self.subscriber.close()
def rows_inserted_before(self): def rows_inserted_before(self):
scrollbar = self.log.verticalScrollBar() scrollbar = self.log.verticalScrollBar()

View File

@ -232,26 +232,24 @@ class MonInj(TaskObject):
self.dm = _DeviceManager(self.send_to_device, dict()) self.dm = _DeviceManager(self.send_to_device, dict())
self.transport = None self.transport = None
@asyncio.coroutine async def start(self, server, port):
def start(self, server, port):
loop = asyncio.get_event_loop() loop = asyncio.get_event_loop()
yield from loop.create_datagram_endpoint(lambda: self, await loop.create_datagram_endpoint(lambda: self,
family=socket.AF_INET) family=socket.AF_INET)
try: try:
yield from self.subscriber.connect(server, port) await self.subscriber.connect(server, port)
try: try:
TaskObject.start(self) TaskObject.start(self)
except: except:
yield from self.subscriber.close() await self.subscriber.close()
raise raise
except: except:
self.transport.close() self.transport.close()
raise raise
@asyncio.coroutine async def stop(self):
def stop(self): await TaskObject.stop(self)
yield from TaskObject.stop(self) await self.subscriber.close()
yield from self.subscriber.close()
if self.transport is not None: if self.transport is not None:
self.transport.close() self.transport.close()
self.transport = None self.transport = None
@ -295,10 +293,9 @@ class MonInj(TaskObject):
else: else:
self.transport.sendto(data, (ca, 3250)) self.transport.sendto(data, (ca, 3250))
@asyncio.coroutine async def _do(self):
def _do(self):
while True: while True:
yield from asyncio.sleep(0.2) await asyncio.sleep(0.2)
# MONINJ_REQ_MONITOR # MONINJ_REQ_MONITOR
self.send_to_device(b"\x01") self.send_to_device(b"\x01")

View File

@ -59,14 +59,12 @@ class ParametersDock(dockarea.Dock):
else: else:
self.table.hideRow(row) self.table.hideRow(row)
@asyncio.coroutine async def sub_connect(self, host, port):
def sub_connect(self, host, port):
self.subscriber = Subscriber("parameters", self.init_parameters_model) self.subscriber = Subscriber("parameters", self.init_parameters_model)
yield from self.subscriber.connect(host, port) await self.subscriber.connect(host, port)
@asyncio.coroutine async def sub_close(self):
def sub_close(self): await self.subscriber.close()
yield from self.subscriber.close()
def init_parameters_model(self, init): def init_parameters_model(self, init):
self.table_model = ParametersModel(self.table, init) self.table_model = ParametersModel(self.table, init)

View File

@ -68,15 +68,13 @@ class ResultsDock(dockarea.Dock):
def get_result(self, key): def get_result(self, key):
return self.table_model.backing_store[key] return self.table_model.backing_store[key]
@asyncio.coroutine async def sub_connect(self, host, port):
def sub_connect(self, host, port):
self.subscriber = Subscriber("rt_results", self.init_results_model, self.subscriber = Subscriber("rt_results", self.init_results_model,
self.on_mod) self.on_mod)
yield from self.subscriber.connect(host, port) await self.subscriber.connect(host, port)
@asyncio.coroutine async def sub_close(self):
def sub_close(self): await self.subscriber.close()
yield from self.subscriber.close()
def init_results_model(self, init): def init_results_model(self, init):
self.table_model = ResultsModel(self.table, init) self.table_model = ResultsModel(self.table, init)

View File

@ -75,23 +75,20 @@ class ScheduleDock(dockarea.Dock):
delete_action.triggered.connect(self.delete_clicked) delete_action.triggered.connect(self.delete_clicked)
self.table.addAction(delete_action) self.table.addAction(delete_action)
@asyncio.coroutine async def sub_connect(self, host, port):
def sub_connect(self, host, port):
self.subscriber = Subscriber("schedule", self.init_schedule_model) self.subscriber = Subscriber("schedule", self.init_schedule_model)
yield from self.subscriber.connect(host, port) await self.subscriber.connect(host, port)
@asyncio.coroutine async def sub_close(self):
def sub_close(self): await self.subscriber.close()
yield from self.subscriber.close()
def init_schedule_model(self, init): def init_schedule_model(self, init):
self.table_model = _ScheduleModel(self.table, init) self.table_model = _ScheduleModel(self.table, init)
self.table.setModel(self.table_model) self.table.setModel(self.table_model)
return self.table_model return self.table_model
@asyncio.coroutine async def delete(self, rid):
def delete(self, rid): await self.schedule_ctl.delete(rid)
yield from self.schedule_ctl.delete(rid)
def delete_clicked(self): def delete_clicked(self):
idx = self.table.selectedIndexes() idx = self.table.selectedIndexes()

View File

@ -69,11 +69,10 @@ class StateManager(TaskObject):
exc_info=True) exc_info=True)
pyon.store_file(self.filename, data) pyon.store_file(self.filename, data)
@asyncio.coroutine async def _do(self):
def _do(self):
try: try:
while True: while True:
yield from asyncio.sleep(self.autosave_period) await asyncio.sleep(self.autosave_period)
self.save() self.save()
finally: finally:
self.save() self.save()

View File

@ -12,17 +12,16 @@ from artiq.tools import exc_to_warning
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@asyncio.coroutine async def _scan_experiments(wd, log):
def _scan_experiments(wd, log):
r = dict() r = dict()
for f in os.listdir(wd): for f in os.listdir(wd):
if f.endswith(".py"): if f.endswith(".py"):
try: try:
worker = Worker({"log": lambda message: log("scan", message)}) worker = Worker({"log": lambda message: log("scan", message)})
try: try:
description = yield from worker.examine(os.path.join(wd, f)) description = await worker.examine(os.path.join(wd, f))
finally: finally:
yield from worker.close() await worker.close()
for class_name, class_desc in description.items(): for class_name, class_desc in description.items():
name = class_desc["name"] name = class_desc["name"]
arguments = class_desc["arguments"] arguments = class_desc["arguments"]
@ -68,8 +67,7 @@ class Repository:
# The object cannot be used anymore after calling this method. # The object cannot be used anymore after calling this method.
self.backend.release_rev(self.cur_rev) self.backend.release_rev(self.cur_rev)
@asyncio.coroutine async def scan(self, new_cur_rev=None):
def scan(self, new_cur_rev=None):
if self._scanning: if self._scanning:
return return
self._scanning = True self._scanning = True
@ -79,7 +77,7 @@ class Repository:
wd, _ = self.backend.request_rev(new_cur_rev) wd, _ = self.backend.request_rev(new_cur_rev)
self.backend.release_rev(self.cur_rev) self.backend.release_rev(self.cur_rev)
self.cur_rev = new_cur_rev self.cur_rev = new_cur_rev
new_explist = yield from _scan_experiments(wd, self.log_fn) new_explist = await _scan_experiments(wd, self.log_fn)
_sync_explist(self.explist, new_explist) _sync_explist(self.explist, new_explist)
finally: finally:

View File

@ -24,13 +24,12 @@ class RunStatus(Enum):
def _mk_worker_method(name): def _mk_worker_method(name):
@asyncio.coroutine async def worker_method(self, *args, **kwargs):
def worker_method(self, *args, **kwargs):
if self.worker.closed.is_set(): if self.worker.closed.is_set():
return True return True
m = getattr(self.worker, name) m = getattr(self.worker, name)
try: try:
return (yield from m(*args, **kwargs)) return await m(*args, **kwargs)
except Exception as e: except Exception as e:
if isinstance(e, asyncio.CancelledError): if isinstance(e, asyncio.CancelledError):
raise raise
@ -97,19 +96,17 @@ class Run:
runnable = 1 runnable = 1
return (runnable, self.priority, due_date_k, -self.rid) return (runnable, self.priority, due_date_k, -self.rid)
@asyncio.coroutine async def close(self):
def close(self):
# called through pool # called through pool
yield from self.worker.close() await self.worker.close()
del self._notifier[self.rid] del self._notifier[self.rid]
_build = _mk_worker_method("build") _build = _mk_worker_method("build")
@asyncio.coroutine async def build(self):
def build(self): await self._build(self.rid, self.pipeline_name,
yield from self._build(self.rid, self.pipeline_name, self.wd, self.expid,
self.wd, self.expid, self.priority)
self.priority)
prepare = _mk_worker_method("prepare") prepare = _mk_worker_method("prepare")
run = _mk_worker_method("run") run = _mk_worker_method("run")
@ -154,13 +151,12 @@ class RunPool:
self.state_changed.notify() self.state_changed.notify()
return rid return rid
@asyncio.coroutine async def delete(self, rid):
def delete(self, rid):
# called through deleter # called through deleter
if rid not in self.runs: if rid not in self.runs:
return return
run = self.runs[rid] run = self.runs[rid]
yield from run.close() await run.close()
if "repo_rev" in run.expid: if "repo_rev" in run.expid:
self.repo_backend.release_rev(run.expid["repo_rev"]) self.repo_backend.release_rev(run.expid["repo_rev"])
del self.runs[rid] del self.runs[rid]
@ -203,14 +199,13 @@ class PrepareStage(TaskObject):
else: else:
return candidate.due_date - now return candidate.due_date - now
@asyncio.coroutine async def _do(self):
def _do(self):
while True: while True:
run = self._get_run() run = self._get_run()
if run is None: if run is None:
yield from self.pool.state_changed.wait() await self.pool.state_changed.wait()
elif isinstance(run, float): elif isinstance(run, float):
yield from asyncio_wait_or_cancel([self.pool.state_changed.wait()], await asyncio_wait_or_cancel([self.pool.state_changed.wait()],
timeout=run) timeout=run)
else: else:
if run.flush: if run.flush:
@ -221,7 +216,7 @@ class PrepareStage(TaskObject):
for r in self.pool.runs.values()): for r in self.pool.runs.values()):
ev = [self.pool.state_changed.wait(), ev = [self.pool.state_changed.wait(),
run.worker.closed.wait()] run.worker.closed.wait()]
yield from asyncio_wait_or_cancel( await asyncio_wait_or_cancel(
ev, return_when=asyncio.FIRST_COMPLETED) ev, return_when=asyncio.FIRST_COMPLETED)
if run.worker.closed.is_set(): if run.worker.closed.is_set():
break break
@ -229,8 +224,8 @@ class PrepareStage(TaskObject):
continue continue
run.status = RunStatus.preparing run.status = RunStatus.preparing
try: try:
yield from run.build() await run.build()
yield from run.prepare() await run.prepare()
except: except:
logger.warning("got worker exception in prepare stage, " logger.warning("got worker exception in prepare stage, "
"deleting RID %d", "deleting RID %d",
@ -255,8 +250,7 @@ class RunStage(TaskObject):
r = None r = None
return r return r
@asyncio.coroutine async def _do(self):
def _do(self):
stack = [] stack = []
while True: while True:
@ -265,7 +259,7 @@ class RunStage(TaskObject):
next_irun is not None and next_irun is not None and
next_irun.priority_key() > stack[-1].priority_key()): next_irun.priority_key() > stack[-1].priority_key()):
while next_irun is None: while next_irun is None:
yield from self.pool.state_changed.wait() await self.pool.state_changed.wait()
next_irun = self._get_run() next_irun = self._get_run()
stack.append(next_irun) stack.append(next_irun)
@ -273,10 +267,10 @@ class RunStage(TaskObject):
try: try:
if run.status == RunStatus.paused: if run.status == RunStatus.paused:
run.status = RunStatus.running run.status = RunStatus.running
completed = yield from run.resume() completed = await run.resume()
else: else:
run.status = RunStatus.running run.status = RunStatus.running
completed = yield from run.run() completed = await run.run()
except: except:
logger.warning("got worker exception in run stage, " logger.warning("got worker exception in run stage, "
"deleting RID %d", "deleting RID %d",
@ -305,17 +299,16 @@ class AnalyzeStage(TaskObject):
r = None r = None
return r return r
@asyncio.coroutine async def _do(self):
def _do(self):
while True: while True:
run = self._get_run() run = self._get_run()
while run is None: while run is None:
yield from self.pool.state_changed.wait() await self.pool.state_changed.wait()
run = self._get_run() run = self._get_run()
run.status = RunStatus.analyzing run.status = RunStatus.analyzing
try: try:
yield from run.analyze() await run.analyze()
yield from run.write_results() await run.write_results()
except: except:
logger.warning("got worker exception in analyze stage, " logger.warning("got worker exception in analyze stage, "
"deleting RID %d", "deleting RID %d",
@ -337,12 +330,11 @@ class Pipeline:
self._run.start() self._run.start()
self._analyze.start() self._analyze.start()
@asyncio.coroutine async def stop(self):
def stop(self):
# NB: restart of a stopped pipeline is not supported # NB: restart of a stopped pipeline is not supported
yield from self._analyze.stop() await self._analyze.stop()
yield from self._run.stop() await self._run.stop()
yield from self._prepare.stop() await self._prepare.stop()
class Deleter(TaskObject): class Deleter(TaskObject):
@ -358,36 +350,32 @@ class Deleter(TaskObject):
break break
self._queue.put_nowait(rid) self._queue.put_nowait(rid)
@asyncio.coroutine async def join(self):
def join(self): await self._queue.join()
yield from self._queue.join()
@asyncio.coroutine async def _delete(self, rid):
def _delete(self, rid):
for pipeline in self._pipelines.values(): for pipeline in self._pipelines.values():
if rid in pipeline.pool.runs: if rid in pipeline.pool.runs:
logger.debug("deleting RID %d...", rid) logger.debug("deleting RID %d...", rid)
yield from pipeline.pool.delete(rid) await pipeline.pool.delete(rid)
logger.debug("deletion of RID %d completed", rid) logger.debug("deletion of RID %d completed", rid)
break break
@asyncio.coroutine async def _gc_pipelines(self):
def _gc_pipelines(self):
pipeline_names = list(self._pipelines.keys()) pipeline_names = list(self._pipelines.keys())
for name in pipeline_names: for name in pipeline_names:
if not self._pipelines[name].pool.runs: if not self._pipelines[name].pool.runs:
logger.debug("garbage-collecting pipeline '%s'...", name) logger.debug("garbage-collecting pipeline '%s'...", name)
yield from self._pipelines[name].stop() await self._pipelines[name].stop()
del self._pipelines[name] del self._pipelines[name]
logger.debug("garbage-collection of pipeline '%s' completed", logger.debug("garbage-collection of pipeline '%s' completed",
name) name)
@asyncio.coroutine async def _do(self):
def _do(self):
while True: while True:
rid = yield from self._queue.get() rid = await self._queue.get()
yield from self._delete(rid) await self._delete(rid)
yield from self._gc_pipelines() await self._gc_pipelines()
self._queue.task_done() self._queue.task_done()
@ -406,15 +394,14 @@ class Scheduler:
def start(self): def start(self):
self._deleter.start() self._deleter.start()
@asyncio.coroutine async def stop(self):
def stop(self):
# NB: restart of a stopped scheduler is not supported # NB: restart of a stopped scheduler is not supported
self._terminated = True # prevent further runs from being created self._terminated = True # prevent further runs from being created
for pipeline in self._pipelines.values(): for pipeline in self._pipelines.values():
for rid in pipeline.pool.runs.keys(): for rid in pipeline.pool.runs.keys():
self._deleter.delete(rid) self._deleter.delete(rid)
yield from self._deleter.join() await self._deleter.join()
yield from self._deleter.stop() await self._deleter.stop()
if self._pipelines: if self._pipelines:
logger.warning("some pipelines were not garbage-collected") logger.warning("some pipelines were not garbage-collected")

View File

@ -56,27 +56,25 @@ class Worker:
else: else:
return None return None
@asyncio.coroutine async def _create_process(self):
def _create_process(self): await self.io_lock.acquire()
yield from self.io_lock.acquire()
try: try:
if self.closed.is_set(): if self.closed.is_set():
raise WorkerError("Attempting to create process after close") raise WorkerError("Attempting to create process after close")
self.process = yield from asyncio.create_subprocess_exec( self.process = await asyncio.create_subprocess_exec(
sys.executable, "-m", "artiq.master.worker_impl", sys.executable, "-m", "artiq.master.worker_impl",
stdout=subprocess.PIPE, stdin=subprocess.PIPE) stdout=subprocess.PIPE, stdin=subprocess.PIPE)
finally: finally:
self.io_lock.release() self.io_lock.release()
@asyncio.coroutine async def close(self, term_timeout=1.0):
def close(self, term_timeout=1.0):
"""Interrupts any I/O with the worker process and terminates the """Interrupts any I/O with the worker process and terminates the
worker process. worker process.
This method should always be called by the user to clean up, even if This method should always be called by the user to clean up, even if
build() or examine() raises an exception.""" build() or examine() raises an exception."""
self.closed.set() self.closed.set()
yield from self.io_lock.acquire() await self.io_lock.acquire()
try: try:
if self.process is None: if self.process is None:
# Note the %s - self.rid can be None # Note the %s - self.rid can be None
@ -91,26 +89,25 @@ class Worker:
return return
obj = {"action": "terminate"} obj = {"action": "terminate"}
try: try:
yield from self._send(obj, cancellable=False) await self._send(obj, cancellable=False)
except: except:
logger.warning("failed to send terminate command to worker" logger.warning("failed to send terminate command to worker"
" (RID %s), killing", self.rid, exc_info=True) " (RID %s), killing", self.rid, exc_info=True)
self.process.kill() self.process.kill()
yield from self.process.wait() await self.process.wait()
return return
try: try:
yield from asyncio.wait_for(self.process.wait(), term_timeout) await asyncio.wait_for(self.process.wait(), term_timeout)
except asyncio.TimeoutError: except asyncio.TimeoutError:
logger.warning("worker did not exit (RID %s), killing", self.rid) logger.warning("worker did not exit (RID %s), killing", self.rid)
self.process.kill() self.process.kill()
yield from self.process.wait() await self.process.wait()
else: else:
logger.debug("worker exited gracefully (RID %s)", self.rid) logger.debug("worker exited gracefully (RID %s)", self.rid)
finally: finally:
self.io_lock.release() self.io_lock.release()
@asyncio.coroutine async def _send(self, obj, cancellable=True):
def _send(self, obj, cancellable=True):
assert self.io_lock.locked() assert self.io_lock.locked()
line = pyon.encode(obj) line = pyon.encode(obj)
self.process.stdin.write(line.encode()) self.process.stdin.write(line.encode())
@ -118,7 +115,7 @@ class Worker:
ifs = [self.process.stdin.drain()] ifs = [self.process.stdin.drain()]
if cancellable: if cancellable:
ifs.append(self.closed.wait()) ifs.append(self.closed.wait())
fs = yield from asyncio_wait_or_cancel( fs = await asyncio_wait_or_cancel(
ifs, timeout=self.send_timeout, ifs, timeout=self.send_timeout,
return_when=asyncio.FIRST_COMPLETED) return_when=asyncio.FIRST_COMPLETED)
if all(f.cancelled() for f in fs): if all(f.cancelled() for f in fs):
@ -129,10 +126,9 @@ class Worker:
if cancellable and self.closed.is_set(): if cancellable and self.closed.is_set():
raise WorkerError("Data transmission to worker cancelled") raise WorkerError("Data transmission to worker cancelled")
@asyncio.coroutine async def _recv(self, timeout):
def _recv(self, timeout):
assert self.io_lock.locked() assert self.io_lock.locked()
fs = yield from asyncio_wait_or_cancel( fs = await asyncio_wait_or_cancel(
[self.process.stdout.readline(), self.closed.wait()], [self.process.stdout.readline(), self.closed.wait()],
timeout=timeout, return_when=asyncio.FIRST_COMPLETED) timeout=timeout, return_when=asyncio.FIRST_COMPLETED)
if all(f.cancelled() for f in fs): if all(f.cancelled() for f in fs):
@ -148,13 +144,12 @@ class Worker:
raise WorkerError("Worker sent invalid PYON data") raise WorkerError("Worker sent invalid PYON data")
return obj return obj
@asyncio.coroutine async def _handle_worker_requests(self):
def _handle_worker_requests(self):
while True: while True:
try: try:
yield from self.io_lock.acquire() await self.io_lock.acquire()
try: try:
obj = yield from self._recv(self.watchdog_time()) obj = await self._recv(self.watchdog_time())
finally: finally:
self.io_lock.release() self.io_lock.release()
except WorkerTimeout: except WorkerTimeout:
@ -181,24 +176,23 @@ class Worker:
except: except:
reply = {"status": "failed", reply = {"status": "failed",
"message": traceback.format_exc()} "message": traceback.format_exc()}
yield from self.io_lock.acquire() await self.io_lock.acquire()
try: try:
yield from self._send(reply) await self._send(reply)
finally: finally:
self.io_lock.release() self.io_lock.release()
@asyncio.coroutine async def _worker_action(self, obj, timeout=None):
def _worker_action(self, obj, timeout=None):
if timeout is not None: if timeout is not None:
self.watchdogs[-1] = time.monotonic() + timeout self.watchdogs[-1] = time.monotonic() + timeout
try: try:
yield from self.io_lock.acquire() await self.io_lock.acquire()
try: try:
yield from self._send(obj) await self._send(obj)
finally: finally:
self.io_lock.release() self.io_lock.release()
try: try:
completed = yield from self._handle_worker_requests() completed = await self._handle_worker_requests()
except WorkerTimeout: except WorkerTimeout:
raise WorkerWatchdogTimeout raise WorkerWatchdogTimeout
finally: finally:
@ -206,11 +200,10 @@ class Worker:
del self.watchdogs[-1] del self.watchdogs[-1]
return completed return completed
@asyncio.coroutine async def build(self, rid, pipeline_name, wd, expid, priority, timeout=15.0):
def build(self, rid, pipeline_name, wd, expid, priority, timeout=15.0):
self.rid = rid self.rid = rid
yield from self._create_process() await self._create_process()
yield from self._worker_action( await self._worker_action(
{"action": "build", {"action": "build",
"rid": rid, "rid": rid,
"pipeline_name": pipeline_name, "pipeline_name": pipeline_name,
@ -219,45 +212,39 @@ class Worker:
"priority": priority}, "priority": priority},
timeout) timeout)
@asyncio.coroutine async def prepare(self):
def prepare(self): await self._worker_action({"action": "prepare"})
yield from self._worker_action({"action": "prepare"})
@asyncio.coroutine async def run(self):
def run(self): completed = await self._worker_action({"action": "run"})
completed = yield from self._worker_action({"action": "run"})
if not completed: if not completed:
self.yield_time = time.monotonic() self.yield_time = time.monotonic()
return completed return completed
@asyncio.coroutine async def resume(self):
def resume(self):
stop_duration = time.monotonic() - self.yield_time stop_duration = time.monotonic() - self.yield_time
for wid, expiry in self.watchdogs: for wid, expiry in self.watchdogs:
self.watchdogs[wid] += stop_duration self.watchdogs[wid] += stop_duration
completed = yield from self._worker_action({"status": "ok", completed = await self._worker_action({"status": "ok",
"data": None}) "data": None})
if not completed: if not completed:
self.yield_time = time.monotonic() self.yield_time = time.monotonic()
return completed return completed
@asyncio.coroutine async def analyze(self):
def analyze(self): await self._worker_action({"action": "analyze"})
yield from self._worker_action({"action": "analyze"})
@asyncio.coroutine async def write_results(self, timeout=15.0):
def write_results(self, timeout=15.0): await self._worker_action({"action": "write_results"},
yield from self._worker_action({"action": "write_results"}, timeout)
timeout)
@asyncio.coroutine async def examine(self, file, timeout=20.0):
def examine(self, file, timeout=20.0): await self._create_process()
yield from self._create_process()
r = dict() r = dict()
def register(class_name, name, arguments): def register(class_name, name, arguments):
r[class_name] = {"name": name, "arguments": arguments} r[class_name] = {"name": name, "arguments": arguments}
self.register_experiment = register self.register_experiment = register
yield from self._worker_action({"action": "examine", await self._worker_action({"action": "examine",
"file": file}, timeout) "file": file}, timeout)
del self.register_experiment del self.register_experiment
return r return r

View File

@ -12,8 +12,7 @@ class AsyncioServer:
def __init__(self): def __init__(self):
self._client_tasks = set() self._client_tasks = set()
@asyncio.coroutine async def start(self, host, port):
def start(self, host, port):
"""Starts the server. """Starts the server.
The user must call ``stop`` to free resources properly after this The user must call ``stop`` to free resources properly after this
@ -26,11 +25,10 @@ class AsyncioServer:
:param port: TCP port to bind to. :param port: TCP port to bind to.
""" """
self.server = yield from asyncio.start_server(self._handle_connection, self.server = await asyncio.start_server(self._handle_connection,
host, port) host, port)
@asyncio.coroutine async def stop(self):
def stop(self):
"""Stops the server. """Stops the server.
""" """
@ -39,11 +37,11 @@ class AsyncioServer:
task.cancel() task.cancel()
for task in wait_for: for task in wait_for:
try: try:
yield from asyncio.wait_for(task, None) await asyncio.wait_for(task, None)
except asyncio.CancelledError: except asyncio.CancelledError:
pass pass
self.server.close() self.server.close()
yield from self.server.wait_closed() await self.server.wait_closed()
del self.server del self.server
def _client_done(self, task): def _client_done(self, task):

View File

@ -159,16 +159,15 @@ class AsyncioClient:
self.__target_names = None self.__target_names = None
self.__description = None self.__description = None
@asyncio.coroutine async def connect_rpc(self, host, port, target_name):
def connect_rpc(self, host, port, target_name):
"""Connects to the server. This cannot be done in __init__ because """Connects to the server. This cannot be done in __init__ because
this method is a coroutine. See ``Client`` for a description of the this method is a coroutine. See ``Client`` for a description of the
parameters.""" parameters."""
self.__reader, self.__writer = \ self.__reader, self.__writer = \
yield from asyncio.open_connection(host, port) await asyncio.open_connection(host, port)
try: try:
self.__writer.write(_init_string) self.__writer.write(_init_string)
server_identification = yield from self.__recv() server_identification = await self.__recv()
self.__target_names = server_identification["targets"] self.__target_names = server_identification["targets"]
self.__description = server_identification["description"] self.__description = server_identification["description"]
if target_name is not None: if target_name is not None:
@ -205,20 +204,18 @@ class AsyncioClient:
line = pyon.encode(obj) + "\n" line = pyon.encode(obj) + "\n"
self.__writer.write(line.encode()) self.__writer.write(line.encode())
@asyncio.coroutine async def __recv(self):
def __recv(self): line = await self.__reader.readline()
line = yield from self.__reader.readline()
return pyon.decode(line.decode()) return pyon.decode(line.decode())
@asyncio.coroutine async def __do_rpc(self, name, args, kwargs):
def __do_rpc(self, name, args, kwargs): await self.__lock.acquire()
yield from self.__lock.acquire()
try: try:
obj = {"action": "call", "name": name, obj = {"action": "call", "name": name,
"args": args, "kwargs": kwargs} "args": args, "kwargs": kwargs}
self.__send(obj) self.__send(obj)
obj = yield from self.__recv() obj = await self.__recv()
if obj["status"] == "ok": if obj["status"] == "ok":
return obj["ret"] return obj["ret"]
elif obj["status"] == "failed": elif obj["status"] == "failed":
@ -229,9 +226,8 @@ class AsyncioClient:
self.__lock.release() self.__lock.release()
def __getattr__(self, name): def __getattr__(self, name):
@asyncio.coroutine async def proxy(*args, **kwargs):
def proxy(*args, **kwargs): res = await self.__do_rpc(name, args, kwargs)
res = yield from self.__do_rpc(name, args, kwargs)
return res return res
return proxy return proxy
@ -413,10 +409,9 @@ class Server(_AsyncioServer):
if builtin_terminate: if builtin_terminate:
self._terminate_request = asyncio.Event() self._terminate_request = asyncio.Event()
@asyncio.coroutine async def _handle_connection_cr(self, reader, writer):
def _handle_connection_cr(self, reader, writer):
try: try:
line = yield from reader.readline() line = await reader.readline()
if line != _init_string: if line != _init_string:
return return
@ -426,7 +421,7 @@ class Server(_AsyncioServer):
} }
line = pyon.encode(obj) + "\n" line = pyon.encode(obj) + "\n"
writer.write(line.encode()) writer.write(line.encode())
line = yield from reader.readline() line = await reader.readline()
if not line: if not line:
return return
target_name = line.decode()[:-1] target_name = line.decode()[:-1]
@ -436,7 +431,7 @@ class Server(_AsyncioServer):
return return
while True: while True:
line = yield from reader.readline() line = await reader.readline()
if not line: if not line:
break break
obj = pyon.decode(line.decode()) obj = pyon.decode(line.decode())
@ -486,9 +481,8 @@ class Server(_AsyncioServer):
finally: finally:
writer.close() writer.close()
@asyncio.coroutine async def wait_terminate(self):
def wait_terminate(self): await self._terminate_request.wait()
yield from self._terminate_request.wait()
def simple_server_loop(targets, host, port, description=None): def simple_server_loop(targets, host, port, description=None):

View File

@ -61,10 +61,9 @@ class Subscriber:
self.target_builders = [target_builder] self.target_builders = [target_builder]
self.notify_cb = notify_cb self.notify_cb = notify_cb
@asyncio.coroutine async def connect(self, host, port, before_receive_cb=None):
def connect(self, host, port, before_receive_cb=None):
self.reader, self.writer = \ self.reader, self.writer = \
yield from asyncio.open_connection(host, port) await asyncio.open_connection(host, port)
try: try:
if before_receive_cb is not None: if before_receive_cb is not None:
before_receive_cb() before_receive_cb()
@ -77,12 +76,11 @@ class Subscriber:
del self.writer del self.writer
raise raise
@asyncio.coroutine async def close(self):
def close(self):
try: try:
self.receive_task.cancel() self.receive_task.cancel()
try: try:
yield from asyncio.wait_for(self.receive_task, None) await asyncio.wait_for(self.receive_task, None)
except asyncio.CancelledError: except asyncio.CancelledError:
pass pass
finally: finally:
@ -90,11 +88,10 @@ class Subscriber:
del self.reader del self.reader
del self.writer del self.writer
@asyncio.coroutine async def _receive_cr(self):
def _receive_cr(self):
targets = [] targets = []
while True: while True:
line = yield from self.reader.readline() line = await self.reader.readline()
if not line: if not line:
return return
mod = pyon.decode(line.decode()) mod = pyon.decode(line.decode())
@ -209,14 +206,13 @@ class Publisher(AsyncioServer):
for notifier in notifiers.values(): for notifier in notifiers.values():
notifier.publish = partial(self.publish, notifier) notifier.publish = partial(self.publish, notifier)
@asyncio.coroutine async def _handle_connection_cr(self, reader, writer):
def _handle_connection_cr(self, reader, writer):
try: try:
line = yield from reader.readline() line = await reader.readline()
if line != _init_string: if line != _init_string:
return return
line = yield from reader.readline() line = await reader.readline()
if not line: if not line:
return return
notifier_name = line.decode()[:-1] notifier_name = line.decode()[:-1]
@ -234,10 +230,10 @@ class Publisher(AsyncioServer):
self._recipients[notifier_name].add(queue) self._recipients[notifier_name].add(queue)
try: try:
while True: while True:
line = yield from queue.get() line = await queue.get()
writer.write(line) writer.write(line)
# raise exception on connection error # raise exception on connection error
yield from writer.drain() await writer.drain()
finally: finally:
self._recipients[notifier_name].remove(queue) self._recipients[notifier_name].remove(queue)
except ConnectionResetError: except ConnectionResetError:

View File

@ -52,23 +52,22 @@ class RPCCase(unittest.TestCase):
def test_blocking_echo(self): def test_blocking_echo(self):
self._run_server_and_test(self._blocking_echo) self._run_server_and_test(self._blocking_echo)
@asyncio.coroutine async def _asyncio_echo(self):
def _asyncio_echo(self):
remote = pc_rpc.AsyncioClient() remote = pc_rpc.AsyncioClient()
for attempt in range(100): for attempt in range(100):
yield from asyncio.sleep(.2) await asyncio.sleep(.2)
try: try:
yield from remote.connect_rpc(test_address, test_port, "test") await remote.connect_rpc(test_address, test_port, "test")
except ConnectionRefusedError: except ConnectionRefusedError:
pass pass
else: else:
break break
try: try:
test_object_back = yield from remote.echo(test_object) test_object_back = await remote.echo(test_object)
self.assertEqual(test_object, test_object_back) self.assertEqual(test_object, test_object_back)
with self.assertRaises(pc_rpc.RemoteError): with self.assertRaises(pc_rpc.RemoteError):
yield from remote.non_existing_method() await remote.non_existing_method()
yield from remote.terminate() await remote.terminate()
finally: finally:
remote.close_rpc() remote.close_rpc()

View File

@ -8,7 +8,6 @@ test_address = "::1"
test_port = 7777 test_port = 7777
@asyncio.coroutine
def write_test_data(test_dict): def write_test_data(test_dict):
test_values = [5, 2.1, None, True, False, test_values = [5, 2.1, None, True, False,
{"a": 5, 2: np.linspace(0, 10, 1)}, {"a": 5, 2: np.linspace(0, 10, 1)},
@ -30,12 +29,11 @@ def write_test_data(test_dict):
test_dict["finished"] = True test_dict["finished"] = True
@asyncio.coroutine async def start_server(publisher_future, test_dict_future):
def start_server(publisher_future, test_dict_future):
test_dict = sync_struct.Notifier(dict()) test_dict = sync_struct.Notifier(dict())
publisher = sync_struct.Publisher( publisher = sync_struct.Publisher(
{"test": test_dict}) {"test": test_dict})
yield from publisher.start(test_address, test_port) await publisher.start(test_address, test_port)
publisher_future.set_result(publisher) publisher_future.set_result(publisher)
test_dict_future.set_result(test_dict) test_dict_future.set_result(test_dict)
@ -66,9 +64,9 @@ class SyncStructCase(unittest.TestCase):
self.publisher = publisher.result() self.publisher = publisher.result()
test_dict = test_dict.result() test_dict = test_dict.result()
test_vector = dict() test_vector = dict()
loop.run_until_complete(write_test_data(test_vector)) write_test_data(test_vector)
asyncio.ensure_future(write_test_data(test_dict)) write_test_data(test_dict)
self.subscriber = sync_struct.Subscriber("test", self.init_test_dict, self.subscriber = sync_struct.Subscriber("test", self.init_test_dict,
self.notify) self.notify)
loop.run_until_complete(self.subscriber.connect(test_address, loop.run_until_complete(self.subscriber.connect(test_address,

View File

@ -36,15 +36,14 @@ class WatchdogTimeoutInBuild(EnvExperiment):
pass pass
@asyncio.coroutine async def _call_worker(worker, expid):
def _call_worker(worker, expid):
try: try:
yield from worker.build(0, "main", None, expid, 0) await worker.build(0, "main", None, expid, 0)
yield from worker.prepare() await worker.prepare()
yield from worker.run() await worker.run()
yield from worker.analyze() await worker.analyze()
finally: finally:
yield from worker.close() await worker.close()
def _run_experiment(class_name): def _run_experiment(class_name):

View File

@ -79,27 +79,25 @@ def init_logger(args):
logging.basicConfig(level=logging.WARNING + args.quiet*10 - args.verbose*10) logging.basicConfig(level=logging.WARNING + args.quiet*10 - args.verbose*10)
@asyncio.coroutine async def exc_to_warning(coro):
def exc_to_warning(coro):
try: try:
yield from coro await coro
except: except:
logger.warning("asyncio coroutine terminated with exception", logger.warning("asyncio coroutine terminated with exception",
exc_info=True) exc_info=True)
@asyncio.coroutine async def asyncio_wait_or_cancel(fs, **kwargs):
def asyncio_wait_or_cancel(fs, **kwargs):
fs = [asyncio.ensure_future(f) for f in fs] fs = [asyncio.ensure_future(f) for f in fs]
try: try:
d, p = yield from asyncio.wait(fs, **kwargs) d, p = await asyncio.wait(fs, **kwargs)
except: except:
for f in fs: for f in fs:
f.cancel() f.cancel()
raise raise
for f in p: for f in p:
f.cancel() f.cancel()
yield from asyncio.wait([f]) await asyncio.wait([f])
return fs return fs
@ -107,17 +105,15 @@ class TaskObject:
def start(self): def start(self):
self.task = asyncio.ensure_future(self._do()) self.task = asyncio.ensure_future(self._do())
@asyncio.coroutine async def stop(self):
def stop(self):
self.task.cancel() self.task.cancel()
try: try:
yield from asyncio.wait_for(self.task, None) await asyncio.wait_for(self.task, None)
except asyncio.CancelledError: except asyncio.CancelledError:
pass pass
del self.task del self.task
@asyncio.coroutine async def _do(self):
def _do(self):
raise NotImplementedError raise NotImplementedError
@ -129,13 +125,12 @@ class Condition:
self._loop = asyncio.get_event_loop() self._loop = asyncio.get_event_loop()
self._waiters = collections.deque() self._waiters = collections.deque()
@asyncio.coroutine async def wait(self):
def wait(self):
"""Wait until notified.""" """Wait until notified."""
fut = asyncio.Future(loop=self._loop) fut = asyncio.Future(loop=self._loop)
self._waiters.append(fut) self._waiters.append(fut)
try: try:
yield from fut await fut
finally: finally:
self._waiters.remove(fut) self._waiters.remove(fut)