1
0

taskgroups: don't log CancelledError

This commit is contained in:
SomberNight
2020-02-27 20:22:49 +01:00
parent 15e91169c5
commit b21bcf5977
3 changed files with 18 additions and 6 deletions

View File

@@ -312,14 +312,17 @@ class Daemon(Logger):
async def _run(self, jobs: Iterable = None):
if jobs is None:
jobs = []
self.logger.info("starting taskgroup.")
try:
async with self.taskgroup as group:
[await group.spawn(job) for job in jobs]
await group.spawn(asyncio.Event().wait) # run forever (until cancel)
except BaseException as e:
self.logger.exception('daemon.taskgroup died.')
except asyncio.CancelledError:
raise
except Exception as e:
self.logger.exception("taskgroup died.")
finally:
self.logger.info("stopping daemon.taskgroup")
self.logger.info("taskgroup stopped.")
async def authenticate(self, headers):
if self.rpc_password == '':

View File

@@ -161,11 +161,16 @@ class LNWorker(Logger):
@ignore_exceptions # don't kill outer taskgroup
async def main_loop(self):
self.logger.info("starting taskgroup.")
try:
async with self.taskgroup as group:
await group.spawn(self._maintain_connectivity())
except asyncio.CancelledError:
raise
except Exception as e:
self.logger.exception("taskgroup died.")
finally:
self.logger.info("taskgroup stopped.")
async def _maintain_connectivity(self):
while True:

View File

@@ -1131,6 +1131,7 @@ class Network(Logger):
self._start_interface(self.default_server)
async def main():
self.logger.info("starting taskgroup.")
try:
await self._init_headers_file()
# note: if a task finishes with CancelledError, that
@@ -1138,9 +1139,12 @@ class Network(Logger):
async with taskgroup as group:
await group.spawn(self._maintain_sessions())
[await group.spawn(job) for job in self._jobs]
except BaseException as e:
self.logger.exception('taskgroup died.')
raise e
except asyncio.CancelledError:
raise
except Exception as e:
self.logger.exception("taskgroup died.")
finally:
self.logger.info("taskgroup stopped.")
asyncio.run_coroutine_threadsafe(main(), self.asyncio_loop)
self.trigger_callback('network_updated')