generated from daniil-berg/boilerplate-py
Compare commits
2 Commits
v1.0.2
...
1dff4152df
Author | SHA1 | Date | |
---|---|---|---|
1dff4152df | |||
a3b99048de |
@ -1,12 +1,14 @@
|
||||
[run]
|
||||
source = src/
|
||||
branch = true
|
||||
command_line = -m unittest discover
|
||||
omit =
|
||||
.venv/*
|
||||
|
||||
[report]
|
||||
fail_under = 100
|
||||
show_missing = True
|
||||
skip_covered = False
|
||||
exclude_lines =
|
||||
if TYPE_CHECKING:
|
||||
if __name__ == ['"]__main__['"]:
|
||||
omit =
|
||||
tests/*
|
||||
|
88
.github/workflows/main.yaml
vendored
88
.github/workflows/main.yaml
vendored
@ -1,88 +0,0 @@
|
||||
name: CI
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
jobs:
|
||||
tests:
|
||||
name: Python ${{ matrix.python-version }} Tests
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version:
|
||||
- '3.8'
|
||||
- '3.9'
|
||||
- '3.10'
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache: 'pip'
|
||||
cache-dependency-path: 'requirements/dev.txt'
|
||||
|
||||
- name: Upgrade packaging tools
|
||||
run: pip install -U pip
|
||||
|
||||
- name: Install dependencies
|
||||
run: pip install -U -r requirements/dev.txt
|
||||
|
||||
- name: Install asyncio-taskpool
|
||||
run: pip install -e .
|
||||
|
||||
- name: Run tests for Python ${{ matrix.python-version }}
|
||||
if: ${{ matrix.python-version != '3.10' }}
|
||||
run: python -m tests
|
||||
|
||||
- name: Run tests for Python 3.10 and save coverage
|
||||
if: ${{ matrix.python-version == '3.10' }}
|
||||
run: echo "coverage=$(./coverage.sh)" >> $GITHUB_ENV
|
||||
|
||||
outputs:
|
||||
coverage: ${{ env.coverage }}
|
||||
|
||||
update_badges:
|
||||
needs: tests
|
||||
name: Update Badges
|
||||
env:
|
||||
meta_gist_id: 3f8240a976e8781a765d9c74a583dcda
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Download `cloc`
|
||||
run: sudo apt-get update -y && sudo apt-get install -y cloc
|
||||
|
||||
- name: Count lines of code/comments
|
||||
run: |
|
||||
echo "cloc_code=$(./cloc.sh -c src/)" >> $GITHUB_ENV
|
||||
echo "cloc_comments=$(./cloc.sh -m src/)" >> $GITHUB_ENV
|
||||
echo "cloc_commentpercent=$(./cloc.sh -p src/)" >> $GITHUB_ENV
|
||||
|
||||
- name: Create badge for lines of code
|
||||
uses: Schneegans/dynamic-badges-action@v1.2.0
|
||||
with:
|
||||
auth: ${{ secrets.GIST_META_DATA }}
|
||||
gistID: ${{ env.meta_gist_id }}
|
||||
filename: cloc-code.json
|
||||
label: Lines of Code
|
||||
message: ${{ env.cloc_code }}
|
||||
|
||||
- name: Create badge for lines of comments
|
||||
uses: Schneegans/dynamic-badges-action@v1.2.0
|
||||
with:
|
||||
auth: ${{ secrets.GIST_META_DATA }}
|
||||
gistID: ${{ env.meta_gist_id }}
|
||||
filename: cloc-comments.json
|
||||
label: Comments
|
||||
message: ${{ env.cloc_comments }} (${{ env.cloc_commentpercent }}%)
|
||||
|
||||
- name: Create badge for test coverage
|
||||
uses: Schneegans/dynamic-badges-action@v1.2.0
|
||||
with:
|
||||
auth: ${{ secrets.GIST_META_DATA }}
|
||||
gistID: ${{ env.meta_gist_id }}
|
||||
filename: test-coverage.json
|
||||
label: Coverage
|
||||
message: ${{ needs.tests.outputs.coverage }}
|
@ -1,11 +0,0 @@
|
||||
version: 2
|
||||
build:
|
||||
os: 'ubuntu-20.04'
|
||||
tools:
|
||||
python: '3.8'
|
||||
python:
|
||||
install:
|
||||
- method: pip
|
||||
path: .
|
||||
sphinx:
|
||||
fail_on_warning: true
|
47
README.md
47
README.md
@ -1,30 +1,7 @@
|
||||
[//]: # (This file is part of asyncio-taskpool.)
|
||||
|
||||
[//]: # (asyncio-taskpool is free software: you can redistribute it and/or modify it under the terms of)
|
||||
[//]: # (version 3.0 of the GNU Lesser General Public License as published by the Free Software Foundation.)
|
||||
|
||||
[//]: # (asyncio-taskpool is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;)
|
||||
[//]: # (without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.)
|
||||
[//]: # (See the GNU Lesser General Public License for more details.)
|
||||
|
||||
[//]: # (You should have received a copy of the GNU Lesser General Public License along with asyncio-taskpool.)
|
||||
[//]: # (If not, see <https://www.gnu.org/licenses/>.)
|
||||
|
||||
# asyncio-taskpool
|
||||
|
||||
[![GitHub last commit][github-last-commit-img]][github-last-commit]
|
||||
![Lines of code][gist-cloc-code-img]
|
||||
![Lines of comments][gist-cloc-comments-img]
|
||||
![Test coverage][gist-test-coverage-img]
|
||||
[![License: LGPL v3.0][lgpl3-img]][lgpl3]
|
||||
[![PyPI version][pypi-latest-version-img]][pypi-latest-version]
|
||||
|
||||
**Dynamically manage pools of asyncio tasks**
|
||||
|
||||
Full documentation available at [RtD](https://asyncio-taskpool.readthedocs.io/en/latest).
|
||||
|
||||
---
|
||||
|
||||
## Contents
|
||||
- [Contents](#contents)
|
||||
- [Summary](#summary)
|
||||
@ -50,16 +27,25 @@ Generally speaking, a task is added to a pool by providing it with a coroutine f
|
||||
|
||||
```python
|
||||
from asyncio_taskpool import SimpleTaskPool
|
||||
|
||||
...
|
||||
|
||||
|
||||
async def work(_foo, _bar): ...
|
||||
|
||||
|
||||
...
|
||||
|
||||
|
||||
async def main():
|
||||
pool = SimpleTaskPool(work, args=('xyz', 420))
|
||||
pool.start(5)
|
||||
await pool.start(5)
|
||||
...
|
||||
pool.stop(3)
|
||||
...
|
||||
pool.lock()
|
||||
await pool.gather_and_close()
|
||||
...
|
||||
```
|
||||
|
||||
Since one of the main goals of `asyncio-taskpool` is to be able to start/stop tasks dynamically or "on-the-fly", _most_ of the associated methods are non-blocking _most_ of the time. A notable exception is the `gather_and_close` method for awaiting the return of all tasks in the pool. (It is essentially a glorified wrapper around the [`asyncio.gather`](https://docs.python.org/3/library/asyncio-task.html#asyncio.gather) function.)
|
||||
@ -78,7 +64,8 @@ Python Version 3.8+, tested on Linux
|
||||
|
||||
## Testing
|
||||
|
||||
Install [`coverage`](https://coverage.readthedocs.io/en/latest/) with `pip`, then execute the [`./coverage.sh`](coverage.sh) shell script to run all unit tests and save the coverage report.
|
||||
Install `asyncio-taskpool[dev]` dependencies or just manually install [`coverage`](https://coverage.readthedocs.io/en/latest/) with `pip`.
|
||||
Execute the [`./coverage.sh`](coverage.sh) shell script to run all unit tests and receive the coverage report.
|
||||
|
||||
## License
|
||||
|
||||
@ -89,13 +76,3 @@ The full license texts for the [GNU GPLv3.0](COPYING) and the [GNU LGPLv3.0](COP
|
||||
---
|
||||
|
||||
© 2022 Daniil Fajnberg
|
||||
|
||||
[github-last-commit]: https://github.com/daniil-berg/asyncio-taskpool/commits
|
||||
[github-last-commit-img]: https://img.shields.io/github/last-commit/daniil-berg/asyncio-taskpool?label=Last%20commit&logo=git&
|
||||
[gist-cloc-code-img]: https://img.shields.io/endpoint?logo=python&color=blue&url=https://gist.githubusercontent.com/daniil-berg/3f8240a976e8781a765d9c74a583dcda/raw/cloc-code.json
|
||||
[gist-cloc-comments-img]: https://img.shields.io/endpoint?logo=sharp&color=lightgrey&url=https://gist.githubusercontent.com/daniil-berg/3f8240a976e8781a765d9c74a583dcda/raw/cloc-comments.json
|
||||
[gist-test-coverage-img]: https://img.shields.io/endpoint?logo=pytest&color=blue&url=https://gist.githubusercontent.com/daniil-berg/3f8240a976e8781a765d9c74a583dcda/raw/test-coverage.json
|
||||
[lgpl3]: https://www.gnu.org/licenses/lgpl-3.0
|
||||
[lgpl3-img]: https://img.shields.io/badge/License-LGPL_v3.0-darkgreen.svg?logo=gnu
|
||||
[pypi-latest-version-img]: https://img.shields.io/pypi/v/asyncio-taskpool?color=teal&logo=pypi
|
||||
[pypi-latest-version]: https://pypi.org/project/asyncio-taskpool/
|
||||
|
46
cloc.sh
46
cloc.sh
@ -1,46 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This file is part of asyncio-taskpool.
|
||||
|
||||
# asyncio-taskpool is free software: you can redistribute it and/or modify it under the terms of
|
||||
# version 3.0 of the GNU Lesser General Public License as published by the Free Software Foundation.
|
||||
|
||||
# asyncio-taskpool is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
|
||||
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
# See the GNU Lesser General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Lesser General Public License along with asyncio-taskpool.
|
||||
# If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
typeset option
|
||||
if getopts 'bcmp' option; then
|
||||
if [[ ${option} == [bcmp] ]]; then
|
||||
shift
|
||||
else
|
||||
echo >&2 "Invalid option '$1' provided"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
typeset source=$1
|
||||
if [[ -z ${source} ]]; then
|
||||
echo >&2 Source file/directory missing
|
||||
exit 1
|
||||
fi
|
||||
|
||||
typeset blank code comment commentpercent
|
||||
read blank comment code commentpercent < <( \
|
||||
cloc --csv --quiet --hide-rate --include-lang Python ${source} |
|
||||
awk -F, '$2 == "SUM" {printf ("%d %d %d %1.0f", $3, $4, $5, 100 * $4 / ($5 + $4)); exit}'
|
||||
)
|
||||
|
||||
case ${option} in
|
||||
b) echo ${blank} ;;
|
||||
c) echo ${code} ;;
|
||||
m) echo ${comment} ;;
|
||||
p) echo ${commentpercent} ;;
|
||||
*) echo Blank lines: ${blank}
|
||||
echo Lines of comments: ${comment}
|
||||
echo Lines of code: ${code}
|
||||
echo Comment percentage: ${commentpercent} ;;
|
||||
esac
|
26
coverage.sh
26
coverage.sh
@ -1,25 +1,3 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/usr/bin/env sh
|
||||
|
||||
# This file is part of asyncio-taskpool.
|
||||
|
||||
# asyncio-taskpool is free software: you can redistribute it and/or modify it under the terms of
|
||||
# version 3.0 of the GNU Lesser General Public License as published by the Free Software Foundation.
|
||||
|
||||
# asyncio-taskpool is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
|
||||
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
# See the GNU Lesser General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Lesser General Public License along with asyncio-taskpool.
|
||||
# If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
coverage erase
|
||||
coverage run 2> /dev/null
|
||||
|
||||
typeset report=$(coverage report)
|
||||
typeset total=$(echo "${report}" | awk '$1 == "TOTAL" {print $NF; exit}')
|
||||
|
||||
if [[ ${total} == 100% ]]; then
|
||||
echo ${total}
|
||||
else
|
||||
echo "${report}"
|
||||
fi
|
||||
coverage erase && coverage run -m unittest discover && coverage report
|
||||
|
7
docs/source/api/asyncio_taskpool.control.parser.rst
Normal file
7
docs/source/api/asyncio_taskpool.control.parser.rst
Normal file
@ -0,0 +1,7 @@
|
||||
asyncio\_taskpool.control.parser module
|
||||
=======================================
|
||||
|
||||
.. automodule:: asyncio_taskpool.control.parser
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
@ -13,4 +13,6 @@ Submodules
|
||||
:maxdepth: 4
|
||||
|
||||
asyncio_taskpool.control.client
|
||||
asyncio_taskpool.control.parser
|
||||
asyncio_taskpool.control.server
|
||||
asyncio_taskpool.control.session
|
||||
|
7
docs/source/api/asyncio_taskpool.control.session.rst
Normal file
7
docs/source/api/asyncio_taskpool.control.session.rst
Normal file
@ -0,0 +1,7 @@
|
||||
asyncio\_taskpool.control.session module
|
||||
========================================
|
||||
|
||||
.. automodule:: asyncio_taskpool.control.session
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
@ -22,7 +22,7 @@ copyright = '2022 Daniil Fajnberg'
|
||||
author = 'Daniil Fajnberg'
|
||||
|
||||
# The full version, including alpha/beta/rc tags
|
||||
release = '1.0.2'
|
||||
release = '1.0.0-beta'
|
||||
|
||||
|
||||
# -- General configuration ---------------------------------------------------
|
||||
|
@ -45,7 +45,6 @@ Contents
|
||||
:maxdepth: 2
|
||||
|
||||
pages/pool
|
||||
pages/ids
|
||||
pages/control
|
||||
api/api
|
||||
|
||||
|
@ -1,42 +0,0 @@
|
||||
.. This file is part of asyncio-taskpool.
|
||||
|
||||
.. asyncio-taskpool is free software: you can redistribute it and/or modify it under the terms of
|
||||
version 3.0 of the GNU Lesser General Public License as published by the Free Software Foundation.
|
||||
|
||||
.. asyncio-taskpool is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
|
||||
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
See the GNU Lesser General Public License for more details.
|
||||
|
||||
.. You should have received a copy of the GNU Lesser General Public License along with asyncio-taskpool.
|
||||
If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
.. Copyright © 2022 Daniil Fajnberg
|
||||
|
||||
|
||||
IDs, groups & names
|
||||
===================
|
||||
|
||||
Task IDs
|
||||
--------
|
||||
|
||||
Every task spawned within a pool receives an ID, which is an integer greater or equal to 0 that is unique **within that task pool instance**. An internal counter is incremented whenever a new task is spawned. A task with ID :code:`n` was the :code:`(n+1)`-th task to be spawned in the pool. Task IDs can be used to cancel specific tasks using the :py:meth:`.cancel() <asyncio_taskpool.pool.BaseTaskPool.cancel>` method.
|
||||
|
||||
In practice, it should rarely be necessary to target *specific* tasks. When dealing with a regular :py:class:`TaskPool <asyncio_taskpool.pool.TaskPool>` instance, you would typically cancel entire task groups (see below) rather than individual tasks, whereas with :py:class:`SimpleTaskPool <asyncio_taskpool.pool.SimpleTaskPool>` instances you would indiscriminately cancel a number of tasks using the :py:meth:`.stop() <asyncio_taskpool.pool.SimpleTaskPool.stop>` method.
|
||||
|
||||
The ID of a pool task also appears in the task's name, which is set upon spawning it. (See `here <https://docs.python.org/3/library/asyncio-task.html#asyncio.Task.set_name>`_ for the associated method of the :code:`Task` class.)
|
||||
|
||||
Task groups
|
||||
-----------
|
||||
|
||||
Every method of spawning new tasks in a task pool will add them to a **task group** and return the name of that group. With :py:class:`TaskPool <asyncio_taskpool.pool.TaskPool>` methods such as :py:meth:`.apply() <asyncio_taskpool.pool.TaskPool.apply>` and :py:meth:`.map() <asyncio_taskpool.pool.TaskPool.map>`, the group name can be set explicitly via the :code:`group_name` parameter. By default, the name will be a string containing some meta information depending on which method is used. Passing an existing task group name in any of those methods will result in a :py:class:`InvalidGroupName <asyncio_taskpool.exceptions.InvalidGroupName>` error.
|
||||
|
||||
You can cancel entire task groups using the :py:meth:`.cancel_group() <asyncio_taskpool.pool.BaseTaskPool.cancel_group>` method by passing it the group name. To check which tasks belong to a group, the :py:meth:`.get_group_ids() <asyncio_taskpool.pool.BaseTaskPool.get_group_ids>` method can be used, which takes group names and returns the IDs of the tasks belonging to them.
|
||||
|
||||
The :py:meth:`SimpleTaskPool.start() <asyncio_taskpool.pool.SimpleTaskPool.start>` method will create a new group as well, each time it is called, but it does not allow customizing the group name. Typically, it will not be necessary to keep track of groups in a :py:class:`SimpleTaskPool <asyncio_taskpool.pool.SimpleTaskPool>` instance.
|
||||
|
||||
Task groups do not impose limits on the number of tasks in them, although they can be indirectly constrained by pool size limits.
|
||||
|
||||
Pool names
|
||||
----------
|
||||
|
||||
When initializing a task pool, you can provide a custom name for it, which will appear in its string representation, e.g. when using it in a :code:`print()`. A class attribute keeps track of initialized task pools and assigns each one an index (similar to IDs for pool tasks). If no name is specified when creating a new pool, its index is used in the string representation of it. Pool names can be helpful when using multiple pools and analyzing log messages.
|
@ -81,13 +81,13 @@ By contrast, here is how you would do it with a task pool:
|
||||
|
||||
...
|
||||
pool = TaskPool()
|
||||
group_name = pool.apply(queue_worker_function, args=(q_in, q_out), num=5)
|
||||
group_name = await pool.apply(queue_worker_function, args=(q_in, q_out), num=5)
|
||||
...
|
||||
pool.cancel_group(group_name)
|
||||
...
|
||||
await pool.flush()
|
||||
|
||||
Pretty much self-explanatory, no? (See :doc:`here <./ids>` for more information about groups/names).
|
||||
Pretty much self-explanatory, no?
|
||||
|
||||
Let's consider a slightly more involved example. Assume you have a coroutine function that takes just one argument (some data) as input, does some work with it (maybe connects to the internet in the process), and eventually writes its results to a database (which is globally defined). Here is how that might look:
|
||||
|
||||
@ -141,17 +141,16 @@ Or we could use a task pool:
|
||||
async def main():
|
||||
...
|
||||
pool = TaskPool()
|
||||
pool.map(another_worker_function, data_iterator, num_concurrent=5)
|
||||
await pool.map(another_worker_function, data_iterator, num_concurrent=5)
|
||||
...
|
||||
pool.lock()
|
||||
await pool.gather_and_close()
|
||||
|
||||
Calling the :py:meth:`.map() <asyncio_taskpool.pool.TaskPool.map>` method this way ensures that there will **always** -- i.e. at any given moment in time -- be exactly 5 tasks working concurrently on our data (assuming no other pool interaction).
|
||||
|
||||
The :py:meth:`.gather_and_close() <asyncio_taskpool.pool.BaseTaskPool.gather_and_close>` line will block until **all the data** has been consumed. (see :ref:`blocking-pool-methods`)
|
||||
|
||||
.. note::
|
||||
|
||||
Neither :py:meth:`.apply() <asyncio_taskpool.pool.TaskPool.apply>` nor :py:meth:`.map() <asyncio_taskpool.pool.TaskPool.map>` return coroutines. When they are called, the task pool immediately begins scheduling new tasks to run. No :code:`await` needed.
|
||||
The :py:meth:`.gather_and_close() <asyncio_taskpool.pool.BaseTaskPool.gather_and_close>` line will block until **all the data** has been consumed. (see :ref:`blocking-pool-methods`)
|
||||
|
||||
It can't get any simpler than that, can it? So glad you asked...
|
||||
|
||||
@ -164,13 +163,13 @@ Let's take the :ref:`queue worker example <queue-worker-function>` from before.
|
||||
:caption: main.py
|
||||
|
||||
from asyncio_taskpool import SimpleTaskPool
|
||||
from .work import queue_worker_function
|
||||
from .work import another_worker_function
|
||||
|
||||
|
||||
async def main():
|
||||
...
|
||||
pool = SimpleTaskPool(queue_worker_function, args=(q_in, q_out))
|
||||
pool.start(5)
|
||||
await pool.start(5)
|
||||
...
|
||||
pool.stop_all()
|
||||
...
|
||||
@ -230,4 +229,8 @@ The only method of a pool that one should **always** assume to be blocking is :p
|
||||
|
||||
One method to be aware of is :py:meth:`.flush() <asyncio_taskpool.pool.BaseTaskPool.flush>`. Since it will await only those tasks that the pool considers **ended** or **cancelled**, the blocking can only come from any callbacks that were provided for either of those situations.
|
||||
|
||||
All methods that add tasks to a pool, i.e. :py:meth:`TaskPool.map() <asyncio_taskpool.pool.TaskPool.map>` (and its variants), :py:meth:`TaskPool.apply() <asyncio_taskpool.pool.TaskPool.apply>` and :py:meth:`SimpleTaskPool.start() <asyncio_taskpool.pool.SimpleTaskPool.start>`, are non-blocking by design. They all make use of "meta tasks" under the hood and return immediately. It is important however, to realize that just because they return, does not mean that any actual tasks have been spawned. For example, if a pool size limit was set and there was "no more room" in the pool when :py:meth:`.map() <asyncio_taskpool.pool.TaskPool.map>` was called, there is **no guarantee** that even a single task has started, when it returns.
|
||||
In general, the act of adding tasks to a pool is non-blocking, no matter which particular methods are used. The only notable exception is when a limit on the pool size has been set and there is "not enough room" to add a task. In this case, both :py:meth:`SimpleTaskPool.start() <asyncio_taskpool.pool.SimpleTaskPool.start>` and :py:meth:`TaskPool.apply() <asyncio_taskpool.pool.TaskPool.apply>` will block until the desired number of new tasks found room in the pool (either because other tasks have ended or because the pool size was increased).
|
||||
|
||||
:py:meth:`TaskPool.map() <asyncio_taskpool.pool.TaskPool.map>` (and its variants) will **never** block. Since it makes use of a "meta-task" under the hood, it will always return immediately. However, if the pool was full when it was called, there is **no guarantee** that even a single task has started, when the method returns.
|
||||
:py:meth:`TaskPool.map() <asyncio_taskpool.pool.TaskPool.map>` (and its variants) will **never** block. Since it makes use of a "meta-task" under the hood, it will always return immediately. However, if the pool was full when it was called, there is **no guarantee** that even a single task has started, when the method returns.
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
[metadata]
|
||||
name = asyncio-taskpool
|
||||
version = 1.0.2
|
||||
version = 1.0.0-beta
|
||||
author = Daniil Fajnberg
|
||||
author_email = mail@daniil.fajnberg.de
|
||||
description = Dynamically manage pools of asyncio tasks
|
||||
@ -11,7 +11,7 @@ url = https://git.fajnberg.de/daniil/asyncio-taskpool
|
||||
project_urls =
|
||||
Bug Tracker = https://github.com/daniil-berg/asyncio-taskpool/issues
|
||||
classifiers =
|
||||
Development Status :: 5 - Production/Stable
|
||||
Development Status :: 4 - Beta
|
||||
Programming Language :: Python :: 3
|
||||
Operating System :: OS Independent
|
||||
License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)
|
||||
|
@ -97,22 +97,21 @@ class ControlClient(ABC):
|
||||
writer: The `asyncio.StreamWriter` returned by the `_open_connection()` method
|
||||
|
||||
Returns:
|
||||
`None`, if either `Ctrl+C` was hit, an empty or whitespace-only string was entered, or the user wants the
|
||||
client to disconnect; otherwise, returns the user's input, stripped of leading and trailing spaces and
|
||||
converted to lowercase.
|
||||
`None`, if either `Ctrl+C` was hit, or the user wants the client to disconnect;
|
||||
otherwise, the user's input, stripped of leading and trailing spaces and converted to lowercase.
|
||||
"""
|
||||
try:
|
||||
cmd = input("> ").strip().lower()
|
||||
msg = input("> ").strip().lower()
|
||||
except EOFError: # Ctrl+D shall be equivalent to the :const:`CLIENT_EXIT` command.
|
||||
cmd = CLIENT_EXIT
|
||||
msg = CLIENT_EXIT
|
||||
except KeyboardInterrupt: # Ctrl+C shall simply reset to the input prompt.
|
||||
print()
|
||||
return
|
||||
if cmd == CLIENT_EXIT:
|
||||
if msg == CLIENT_EXIT:
|
||||
writer.close()
|
||||
self._connected = False
|
||||
return
|
||||
return cmd or None # will be None if `cmd` is an empty string
|
||||
return msg
|
||||
|
||||
async def _interact(self, reader: StreamReader, writer: StreamWriter) -> None:
|
||||
"""
|
||||
|
@ -17,21 +17,19 @@ If not, see <https://www.gnu.org/licenses/>."""
|
||||
__doc__ = """
|
||||
Definition of the :class:`ControlParser` used in a
|
||||
:class:`ControlSession <asyncio_taskpool.control.session.ControlSession>`.
|
||||
|
||||
It should not be considered part of the public API.
|
||||
"""
|
||||
|
||||
|
||||
import logging
|
||||
from argparse import Action, ArgumentParser, ArgumentDefaultsHelpFormatter, HelpFormatter, ArgumentTypeError, SUPPRESS
|
||||
from ast import literal_eval
|
||||
from asyncio.streams import StreamWriter
|
||||
from inspect import Parameter, getmembers, isfunction, signature
|
||||
from io import StringIO
|
||||
from shutil import get_terminal_size
|
||||
from typing import Any, Callable, Container, Dict, Iterable, Set, Type, TypeVar
|
||||
|
||||
from ..exceptions import HelpRequested, ParserError
|
||||
from ..internals.constants import CLIENT_INFO, CMD
|
||||
from ..internals.constants import CLIENT_INFO, CMD, STREAM_WRITER
|
||||
from ..internals.helpers import get_first_doc_line, resolve_dotted_path
|
||||
from ..internals.types import ArgsT, CancelCB, CoroutineFunc, EndCB, KwArgsT
|
||||
|
||||
@ -54,8 +52,8 @@ class ControlParser(ArgumentParser):
|
||||
"""
|
||||
Subclass of the standard :code:`argparse.ArgumentParser` for pool control.
|
||||
|
||||
Such a parser is not supposed to ever print to stdout/stderr, but instead direct all messages to a file-like
|
||||
`StringIO` instance passed to it during initialization.
|
||||
Such a parser is not supposed to ever print to stdout/stderr, but instead direct all messages to a `StreamWriter`
|
||||
instance passed to it during initialization.
|
||||
Furthermore, it requires defining the width of the terminal, to adjust help formatting to the terminal size of a
|
||||
connected client.
|
||||
Finally, it offers some convenience methods and makes use of custom exceptions.
|
||||
@ -89,23 +87,25 @@ class ControlParser(ArgumentParser):
|
||||
super().__init__(*args, **kwargs)
|
||||
return ClientHelpFormatter
|
||||
|
||||
def __init__(self, stream: StringIO, terminal_width: int = None, **kwargs) -> None:
|
||||
def __init__(self, stream_writer: StreamWriter, terminal_width: int = None, **kwargs) -> None:
|
||||
"""
|
||||
Sets some internal attributes in addition to the base class.
|
||||
|
||||
Args:
|
||||
stream:
|
||||
A file-like I/O object to use for message output.
|
||||
stream_writer:
|
||||
The instance of the :class:`asyncio.StreamWriter` to use for message output.
|
||||
terminal_width (optional):
|
||||
The terminal width to use for all message formatting. By default the :code:`columns` attribute from
|
||||
:func:`shutil.get_terminal_size` is taken.
|
||||
**kwargs(optional):
|
||||
Passed to the parent class constructor. The exception is the `formatter_class` parameter: Even if a
|
||||
class is specified, it will always be subclassed in the :meth:`help_formatter_factory`.
|
||||
Also, by default, `exit_on_error` is set to `False` (as opposed to how the parent class handles it).
|
||||
"""
|
||||
self._stream: StringIO = stream
|
||||
self._stream_writer: StreamWriter = stream_writer
|
||||
self._terminal_width: int = terminal_width if terminal_width is not None else get_terminal_size().columns
|
||||
kwargs['formatter_class'] = self.help_formatter_factory(self._terminal_width, kwargs.get('formatter_class'))
|
||||
kwargs.setdefault('exit_on_error', False)
|
||||
super().__init__(**kwargs)
|
||||
self._flags: Set[str] = set()
|
||||
self._commands = None
|
||||
@ -194,7 +194,7 @@ class ControlParser(ArgumentParser):
|
||||
Dictionary mapping class member names to the (sub-)parsers created from them.
|
||||
"""
|
||||
parsers: ParsersDict = {}
|
||||
common_kwargs = {'stream': self._stream, CLIENT_INFO.TERMINAL_WIDTH: self._terminal_width}
|
||||
common_kwargs = {STREAM_WRITER: self._stream_writer, CLIENT_INFO.TERMINAL_WIDTH: self._terminal_width}
|
||||
for name, member in getmembers(cls):
|
||||
if name in omit_members or (name.startswith('_') and public_only):
|
||||
continue
|
||||
@ -214,9 +214,9 @@ class ControlParser(ArgumentParser):
|
||||
return self._commands
|
||||
|
||||
def _print_message(self, message: str, *args, **kwargs) -> None:
|
||||
"""This is overridden to ensure that no messages are sent to stdout/stderr, but always to the stream buffer."""
|
||||
"""This is overridden to ensure that no messages are sent to stdout/stderr, but always to the stream writer."""
|
||||
if message:
|
||||
self._stream.write(message)
|
||||
self._stream_writer.write(message.encode())
|
||||
|
||||
def exit(self, status: int = 0, message: str = None) -> None:
|
||||
"""This is overridden to prevent system exit to be invoked."""
|
||||
|
@ -31,7 +31,6 @@ from typing import Optional, Union
|
||||
from .client import ControlClient, TCPControlClient, UnixControlClient
|
||||
from .session import ControlSession
|
||||
from ..pool import AnyTaskPoolT
|
||||
from ..internals.helpers import classmethod
|
||||
from ..internals.types import ConnectedCallbackT, PathT
|
||||
|
||||
|
||||
|
@ -16,8 +16,6 @@ If not, see <https://www.gnu.org/licenses/>."""
|
||||
|
||||
__doc__ = """
|
||||
Definition of the :class:`ControlSession` used by a :class:`ControlServer`.
|
||||
|
||||
It should not be considered part of the public API.
|
||||
"""
|
||||
|
||||
|
||||
@ -26,13 +24,12 @@ import json
|
||||
from argparse import ArgumentError
|
||||
from asyncio.streams import StreamReader, StreamWriter
|
||||
from inspect import isfunction, signature
|
||||
from io import StringIO
|
||||
from typing import Callable, Optional, Union, TYPE_CHECKING
|
||||
|
||||
from .parser import ControlParser
|
||||
from ..exceptions import CommandError, HelpRequested, ParserError
|
||||
from ..pool import TaskPool, SimpleTaskPool
|
||||
from ..internals.constants import CLIENT_INFO, CMD, CMD_OK, SESSION_MSG_BYTES
|
||||
from ..internals.constants import CLIENT_INFO, CMD, CMD_OK, SESSION_MSG_BYTES, STREAM_WRITER
|
||||
from ..internals.helpers import return_or_exception
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@ -75,7 +72,6 @@ class ControlSession:
|
||||
self._reader: StreamReader = reader
|
||||
self._writer: StreamWriter = writer
|
||||
self._parser: Optional[ControlParser] = None
|
||||
self._response_buffer: StringIO = StringIO()
|
||||
|
||||
async def _exec_method_and_respond(self, method: Callable, **kwargs) -> None:
|
||||
"""
|
||||
@ -137,7 +133,7 @@ class ControlSession:
|
||||
client_info = json.loads((await self._reader.read(SESSION_MSG_BYTES)).decode().strip())
|
||||
log.debug("%s connected", self._client_class_name)
|
||||
parser_kwargs = {
|
||||
'stream': self._response_buffer,
|
||||
STREAM_WRITER: self._writer,
|
||||
CLIENT_INFO.TERMINAL_WIDTH: client_info[CLIENT_INFO.TERMINAL_WIDTH],
|
||||
'prog': '',
|
||||
'usage': f'[-h] [{CMD}] ...'
|
||||
@ -164,7 +160,7 @@ class ControlSession:
|
||||
kwargs = vars(self._parser.parse_args(msg.split(' ')))
|
||||
except ArgumentError as e:
|
||||
log.debug("%s got an ArgumentError", self._client_class_name)
|
||||
self._response_buffer.write(str(e))
|
||||
self._writer.write(str(e).encode())
|
||||
return
|
||||
except (HelpRequested, ParserError):
|
||||
log.debug("%s received usage help", self._client_class_name)
|
||||
@ -175,7 +171,7 @@ class ControlSession:
|
||||
elif isinstance(command, property):
|
||||
await self._exec_property_and_respond(command, **kwargs)
|
||||
else:
|
||||
self._response_buffer.write(str(CommandError(f"Unknown command object: {command}")))
|
||||
self._writer.write(str(CommandError(f"Unknown command object: {command}")).encode())
|
||||
|
||||
async def listen(self) -> None:
|
||||
"""
|
||||
@ -192,8 +188,4 @@ class ControlSession:
|
||||
log.debug("%s disconnected", self._client_class_name)
|
||||
break
|
||||
await self._parse_command(msg)
|
||||
response = self._response_buffer.getvalue()
|
||||
self._response_buffer.seek(0)
|
||||
self._response_buffer.truncate()
|
||||
self._writer.write(response.encode())
|
||||
await self._writer.drain()
|
||||
|
@ -51,6 +51,10 @@ class InvalidGroupName(PoolException):
|
||||
pass
|
||||
|
||||
|
||||
class PoolStillUnlocked(PoolException):
|
||||
pass
|
||||
|
||||
|
||||
class NotCoroutine(PoolException):
|
||||
pass
|
||||
|
||||
|
@ -21,17 +21,15 @@ This module should **not** be considered part of the public API.
|
||||
"""
|
||||
|
||||
|
||||
import sys
|
||||
|
||||
|
||||
PACKAGE_NAME = 'asyncio_taskpool'
|
||||
|
||||
PYTHON_BEFORE_39 = sys.version_info[:2] < (3, 9)
|
||||
|
||||
DEFAULT_TASK_GROUP = 'default'
|
||||
|
||||
DATETIME_FORMAT = '%Y-%m-%d_%H-%M-%S'
|
||||
|
||||
SESSION_MSG_BYTES = 1024 * 100
|
||||
|
||||
STREAM_WRITER = 'stream_writer'
|
||||
CMD = 'command'
|
||||
CMD_OK = b"ok"
|
||||
|
||||
|
@ -19,13 +19,11 @@ Miscellaneous helper functions. None of these should be considered part of the p
|
||||
"""
|
||||
|
||||
|
||||
import builtins
|
||||
from asyncio.coroutines import iscoroutinefunction
|
||||
from importlib import import_module
|
||||
from inspect import getdoc
|
||||
from typing import Any, Callable, Optional, Type, Union
|
||||
from typing import Any, Optional, Union
|
||||
|
||||
from .constants import PYTHON_BEFORE_39
|
||||
from .types import T, AnyCallableT, ArgsT, KwArgsT
|
||||
|
||||
|
||||
@ -133,25 +131,3 @@ def resolve_dotted_path(dotted_path: str) -> object:
|
||||
import_module(module_name)
|
||||
found = getattr(found, name)
|
||||
return found
|
||||
|
||||
|
||||
class ClassMethodWorkaround:
|
||||
"""Dirty workaround to make the `@classmethod` decorator work with properties."""
|
||||
|
||||
def __init__(self, method_or_property: Union[Callable, property]) -> None:
|
||||
if isinstance(method_or_property, property):
|
||||
self._getter = method_or_property.fget
|
||||
else:
|
||||
self._getter = method_or_property
|
||||
|
||||
def __get__(self, obj: Union[T, None], cls: Union[Type[T], None]) -> Any:
|
||||
if obj is None:
|
||||
return self._getter(cls)
|
||||
return self._getter(obj)
|
||||
|
||||
|
||||
# Starting with Python 3.9, this is thankfully no longer necessary.
|
||||
if PYTHON_BEFORE_39:
|
||||
classmethod = ClassMethodWorkaround
|
||||
else:
|
||||
classmethod = builtins.classmethod
|
||||
|
@ -23,7 +23,7 @@ This module should **not** be considered part of the public API.
|
||||
|
||||
from asyncio.streams import StreamReader, StreamWriter
|
||||
from pathlib import Path
|
||||
from typing import Any, Awaitable, Callable, Coroutine, Iterable, Mapping, Tuple, TypeVar, Union
|
||||
from typing import Any, Awaitable, Callable, Iterable, Mapping, Tuple, TypeVar, Union
|
||||
|
||||
|
||||
T = TypeVar('T')
|
||||
@ -31,8 +31,8 @@ T = TypeVar('T')
|
||||
ArgsT = Iterable[Any]
|
||||
KwArgsT = Mapping[str, Any]
|
||||
|
||||
AnyCallableT = Callable[..., Union[T, Awaitable[T]]]
|
||||
CoroutineFunc = Callable[..., Coroutine]
|
||||
AnyCallableT = Callable[[...], Union[T, Awaitable[T]]]
|
||||
CoroutineFunc = Callable[[...], Awaitable[Any]]
|
||||
|
||||
EndCB = Callable
|
||||
CancelCB = Callable
|
||||
|
@ -28,17 +28,17 @@ For further details about the classes check their respective documentation.
|
||||
|
||||
|
||||
import logging
|
||||
import warnings
|
||||
from asyncio.coroutines import iscoroutine, iscoroutinefunction
|
||||
from asyncio.exceptions import CancelledError
|
||||
from asyncio.locks import Semaphore
|
||||
from asyncio.tasks import Task, create_task, gather
|
||||
from contextlib import suppress
|
||||
from datetime import datetime
|
||||
from math import inf
|
||||
from typing import Any, Awaitable, Dict, Iterable, List, Set, Union
|
||||
|
||||
from . import exceptions
|
||||
from .internals.constants import DEFAULT_TASK_GROUP, PYTHON_BEFORE_39
|
||||
from .internals.constants import DEFAULT_TASK_GROUP, DATETIME_FORMAT
|
||||
from .internals.group_register import TaskGroupRegister
|
||||
from .internals.helpers import execute_optional, star_function
|
||||
from .internals.types import ArgsT, KwArgsT, CoroutineFunc, EndCB, CancelCB
|
||||
@ -84,10 +84,6 @@ class BaseTaskPool:
|
||||
self._enough_room: Semaphore = Semaphore()
|
||||
self._task_groups: Dict[str, TaskGroupRegister[int]] = {}
|
||||
|
||||
# Mapping task group names to sets of meta tasks, and a bucket for cancelled meta tasks.
|
||||
self._group_meta_tasks_running: Dict[str, Set[Task]] = {}
|
||||
self._meta_tasks_cancelled: Set[Task] = set()
|
||||
|
||||
# Finish with method/functions calls that add the pool to the internal list of pools, set its initial size,
|
||||
# and issue a log message.
|
||||
self._idx: int = self._add_pool(self)
|
||||
@ -327,8 +323,6 @@ class BaseTaskPool:
|
||||
"""
|
||||
self._check_start(awaitable=awaitable, ignore_lock=ignore_lock)
|
||||
await self._enough_room.acquire()
|
||||
# TODO: Make sure that cancellation (group or pool) interrupts this method after context switching!
|
||||
# Possibly make use of the task group register for that.
|
||||
group_reg = self._task_groups.setdefault(group_name, TaskGroupRegister())
|
||||
async with group_reg:
|
||||
task_id = self._num_started
|
||||
@ -361,23 +355,6 @@ class BaseTaskPool:
|
||||
raise exceptions.AlreadyEnded(f"{self._task_name(task_id)} has finished running")
|
||||
raise exceptions.InvalidTaskID(f"No task with ID {task_id} found in {self}")
|
||||
|
||||
@staticmethod
|
||||
def _get_cancel_kw(msg: Union[str, None]) -> Dict[str, str]:
|
||||
"""
|
||||
Returns a dictionary to unpack in a `Task.cancel()` method.
|
||||
|
||||
This method exists to ensure proper compatibility with older Python versions.
|
||||
If `msg` is `None`, an empty dictionary is returned.
|
||||
If `PYTHON_BEFORE_39` is `True` a warning is issued before returning an empty dictionary.
|
||||
Otherwise the keyword dictionary contains the `msg` parameter.
|
||||
"""
|
||||
if msg is None:
|
||||
return {}
|
||||
if PYTHON_BEFORE_39:
|
||||
warnings.warn("Parameter `msg` is not available with Python versions before 3.9 and will be ignored.")
|
||||
return {}
|
||||
return {'msg': msg}
|
||||
|
||||
def cancel(self, *task_ids: int, msg: str = None) -> None:
|
||||
"""
|
||||
Cancels the tasks with the specified IDs.
|
||||
@ -396,9 +373,128 @@ class BaseTaskPool:
|
||||
`InvalidTaskID`: One of the `task_ids` is not known to the pool.
|
||||
"""
|
||||
tasks = [self._get_running_task(task_id) for task_id in task_ids]
|
||||
kw = self._get_cancel_kw(msg)
|
||||
for task in tasks:
|
||||
task.cancel(**kw)
|
||||
task.cancel(msg=msg)
|
||||
|
||||
def _cancel_and_remove_all_from_group(self, group_name: str, group_reg: TaskGroupRegister, msg: str = None) -> None:
|
||||
"""
|
||||
Removes all tasks from the specified group and cancels them.
|
||||
|
||||
Does nothing to tasks, that are no longer running.
|
||||
|
||||
Args:
|
||||
group_name: The name of the group of tasks that shall be cancelled.
|
||||
group_reg: The task group register object containing the task IDs.
|
||||
msg (optional): Passed to the `Task.cancel()` method of every task specified by the `task_ids`.
|
||||
"""
|
||||
while group_reg:
|
||||
try:
|
||||
self._tasks_running[group_reg.pop()].cancel(msg=msg)
|
||||
except KeyError:
|
||||
continue
|
||||
log.debug("%s cancelled tasks from group %s", str(self), group_name)
|
||||
|
||||
async def cancel_group(self, group_name: str, msg: str = None) -> None:
|
||||
"""
|
||||
Cancels an entire group of tasks.
|
||||
|
||||
The task group is subsequently forgotten by the pool.
|
||||
|
||||
Args:
|
||||
group_name: The name of the group of tasks that shall be cancelled.
|
||||
msg (optional): Passed to the `Task.cancel()` method of every task specified by the `task_ids`.
|
||||
|
||||
Raises:
|
||||
`InvalidGroupName`: if no task group named `group_name` exists in the pool.
|
||||
"""
|
||||
log.debug("%s cancelling tasks in group %s", str(self), group_name)
|
||||
try:
|
||||
group_reg = self._task_groups.pop(group_name)
|
||||
except KeyError:
|
||||
raise exceptions.InvalidGroupName(f"No task group named {group_name} exists in this pool.")
|
||||
async with group_reg:
|
||||
self._cancel_and_remove_all_from_group(group_name, group_reg, msg=msg)
|
||||
log.debug("%s forgot task group %s", str(self), group_name)
|
||||
|
||||
async def cancel_all(self, msg: str = None) -> None:
|
||||
"""
|
||||
Cancels all tasks still running within the pool.
|
||||
|
||||
Args:
|
||||
msg (optional): Passed to the `Task.cancel()` method of every task specified by the `task_ids`.
|
||||
"""
|
||||
log.warning("%s cancelling all tasks!", str(self))
|
||||
while self._task_groups:
|
||||
group_name, group_reg = self._task_groups.popitem()
|
||||
async with group_reg:
|
||||
self._cancel_and_remove_all_from_group(group_name, group_reg, msg=msg)
|
||||
|
||||
async def flush(self, return_exceptions: bool = False):
|
||||
"""
|
||||
Gathers (i.e. awaits) all ended/cancelled tasks in the pool.
|
||||
|
||||
The tasks are subsequently forgotten by the pool. This method exists mainly to free up memory of unneeded
|
||||
`Task` objects.
|
||||
|
||||
It blocks, **only if** any of the tasks block while catching a `asyncio.CancelledError` or any of the callbacks
|
||||
registered for the tasks block.
|
||||
|
||||
Args:
|
||||
return_exceptions (optional): Passed directly into `gather`.
|
||||
"""
|
||||
await gather(*self._tasks_ended.values(), *self._tasks_cancelled.values(), return_exceptions=return_exceptions)
|
||||
self._tasks_ended.clear()
|
||||
self._tasks_cancelled.clear()
|
||||
|
||||
async def gather_and_close(self, return_exceptions: bool = False):
|
||||
"""
|
||||
Gathers (i.e. awaits) **all** tasks in the pool, then closes it.
|
||||
|
||||
After this method returns, no more tasks can be started in the pool.
|
||||
|
||||
:meth:`lock` must have been called prior to this.
|
||||
|
||||
This method may block, if one of the tasks blocks while catching a `asyncio.CancelledError` or if any of the
|
||||
callbacks registered for a task blocks for whatever reason.
|
||||
|
||||
Args:
|
||||
return_exceptions (optional): Passed directly into `gather`.
|
||||
|
||||
Raises:
|
||||
`PoolStillUnlocked`: The pool has not been locked yet.
|
||||
"""
|
||||
if not self._locked:
|
||||
raise exceptions.PoolStillUnlocked("Pool must be locked, before tasks can be gathered")
|
||||
await gather(*self._tasks_ended.values(), *self._tasks_cancelled.values(), *self._tasks_running.values(),
|
||||
return_exceptions=return_exceptions)
|
||||
self._tasks_ended.clear()
|
||||
self._tasks_cancelled.clear()
|
||||
self._tasks_running.clear()
|
||||
self._closed = True
|
||||
|
||||
|
||||
class TaskPool(BaseTaskPool):
|
||||
"""
|
||||
General purpose task pool class.
|
||||
|
||||
Attempts to emulate part of the interface of `multiprocessing.pool.Pool` from the stdlib.
|
||||
|
||||
A `TaskPool` instance can manage an arbitrary number of concurrent tasks from any coroutine function.
|
||||
Tasks in the pool can all belong to the same coroutine function,
|
||||
but they can also come from any number of different and unrelated coroutine functions.
|
||||
|
||||
As long as there is room in the pool, more tasks can be added. (By default, there is no pool size limit.)
|
||||
Each task started in the pool receives a unique ID, which can be used to cancel specific tasks at any moment.
|
||||
|
||||
Adding tasks blocks **only if** the pool is full at that moment.
|
||||
"""
|
||||
|
||||
def __init__(self, pool_size: int = inf, name: str = None) -> None:
|
||||
super().__init__(pool_size=pool_size, name=name)
|
||||
# In addition to all the attributes of the base class, we need a dictionary mapping task group names to sets of
|
||||
# meta tasks that are/were running in the context of that group, and a bucket for cancelled meta tasks.
|
||||
self._group_meta_tasks_running: Dict[str, Set[Task]] = {}
|
||||
self._meta_tasks_cancelled: Set[Task] = set()
|
||||
|
||||
def _cancel_group_meta_tasks(self, group_name: str) -> None:
|
||||
"""Cancels and forgets all meta tasks associated with the task group named `group_name`."""
|
||||
@ -411,67 +507,44 @@ class BaseTaskPool:
|
||||
self._meta_tasks_cancelled.update(meta_tasks)
|
||||
log.debug("%s cancelled and forgot meta tasks from group %s", str(self), group_name)
|
||||
|
||||
def _cancel_and_remove_all_from_group(self, group_name: str, group_reg: TaskGroupRegister, **cancel_kw) -> None:
|
||||
"""
|
||||
Removes all tasks from the specified group and cancels them.
|
||||
|
||||
Does nothing to tasks, that are no longer running.
|
||||
|
||||
Args:
|
||||
group_name: The name of the group of tasks that shall be cancelled.
|
||||
group_reg: The task group register object containing the task IDs.
|
||||
msg (optional): Passed to the `Task.cancel()` method of every task specified by the `task_ids`.
|
||||
"""
|
||||
def _cancel_and_remove_all_from_group(self, group_name: str, group_reg: TaskGroupRegister, msg: str = None) -> None:
|
||||
"""See base class."""
|
||||
self._cancel_group_meta_tasks(group_name)
|
||||
while group_reg:
|
||||
try:
|
||||
self._tasks_running[group_reg.pop()].cancel(**cancel_kw)
|
||||
except KeyError:
|
||||
continue
|
||||
log.debug("%s cancelled tasks from group %s", str(self), group_name)
|
||||
super()._cancel_and_remove_all_from_group(group_name, group_reg, msg=msg)
|
||||
|
||||
def cancel_group(self, group_name: str, msg: str = None) -> None:
|
||||
async def cancel_group(self, group_name: str, msg: str = None) -> None:
|
||||
"""
|
||||
Cancels an entire group of tasks.
|
||||
|
||||
The task group is subsequently forgotten by the pool.
|
||||
|
||||
If any methods such launched meta tasks belonging to that group, these meta tasks are cancelled before the
|
||||
actual tasks are cancelled. This means that any tasks "queued" to be started by a meta task will
|
||||
**never even start**.
|
||||
If any methods such as :meth:`map` launched meta tasks belonging to that group, these meta tasks are cancelled
|
||||
before the actual tasks are cancelled. This means that any tasks "queued" to be started by a meta task will
|
||||
**never even start**. In the case of :meth:`map` this would mean that its `arg_iter` may be abandoned before it
|
||||
was fully consumed (if that is even possible).
|
||||
|
||||
Args:
|
||||
group_name: The name of the group of tasks (and meta tasks) that shall be cancelled.
|
||||
msg (optional): Passed to the `Task.cancel()` method of every task in the group.
|
||||
msg (optional): Passed to the `Task.cancel()` method of every task specified by the `task_ids`.
|
||||
|
||||
Raises:
|
||||
`InvalidGroupName`: No task group named `group_name` exists in the pool.
|
||||
"""
|
||||
log.debug("%s cancelling tasks in group %s", str(self), group_name)
|
||||
try:
|
||||
group_reg = self._task_groups.pop(group_name)
|
||||
except KeyError:
|
||||
raise exceptions.InvalidGroupName(f"No task group named {group_name} exists in this pool.")
|
||||
kw = self._get_cancel_kw(msg)
|
||||
self._cancel_and_remove_all_from_group(group_name, group_reg, **kw)
|
||||
log.debug("%s forgot task group %s", str(self), group_name)
|
||||
await super().cancel_group(group_name=group_name, msg=msg)
|
||||
|
||||
def cancel_all(self, msg: str = None) -> None:
|
||||
async def cancel_all(self, msg: str = None) -> None:
|
||||
"""
|
||||
Cancels all tasks still running within the pool (including meta tasks).
|
||||
|
||||
If any methods such launched meta tasks belonging to that group, these meta tasks are cancelled before the
|
||||
actual tasks are cancelled. This means that any tasks "queued" to be started by a meta task will
|
||||
**never even start**.
|
||||
If any methods such as :meth:`map` launched meta tasks, these meta tasks are cancelled before the actual tasks
|
||||
are cancelled. This means that any tasks "queued" to be started by a meta task will **never even start**. In the
|
||||
case of :meth:`map` this would mean that its `arg_iter` may be abandoned before it was fully consumed (if that
|
||||
is even possible).
|
||||
|
||||
Args:
|
||||
msg (optional): Passed to the `Task.cancel()` method of every task.
|
||||
msg (optional): Passed to the `Task.cancel()` method of every task specified by the `task_ids`.
|
||||
"""
|
||||
log.warning("%s cancelling all tasks!", str(self))
|
||||
kw = self._get_cancel_kw(msg)
|
||||
while self._task_groups:
|
||||
group_name, group_reg = self._task_groups.popitem()
|
||||
self._cancel_and_remove_all_from_group(group_name, group_reg, **kw)
|
||||
await super().cancel_all(msg=msg)
|
||||
|
||||
def _pop_ended_meta_tasks(self) -> Set[Task]:
|
||||
"""
|
||||
@ -504,7 +577,7 @@ class BaseTaskPool:
|
||||
Gathers (i.e. awaits) all ended/cancelled tasks in the pool.
|
||||
|
||||
The tasks are subsequently forgotten by the pool. This method exists mainly to free up memory of unneeded
|
||||
`Task` objects. It also gets rid of unneeded (ended/cancelled) meta tasks.
|
||||
`Task` objects. It also gets rid of unneeded meta tasks.
|
||||
|
||||
It blocks, **only if** any of the tasks block while catching a `asyncio.CancelledError` or any of the callbacks
|
||||
registered for the tasks block.
|
||||
@ -516,19 +589,19 @@ class BaseTaskPool:
|
||||
await gather(*self._meta_tasks_cancelled, *self._pop_ended_meta_tasks(),
|
||||
return_exceptions=return_exceptions)
|
||||
self._meta_tasks_cancelled.clear()
|
||||
await gather(*self._tasks_ended.values(), *self._tasks_cancelled.values(), return_exceptions=return_exceptions)
|
||||
self._tasks_ended.clear()
|
||||
self._tasks_cancelled.clear()
|
||||
await super().flush(return_exceptions=return_exceptions)
|
||||
|
||||
async def gather_and_close(self, return_exceptions: bool = False):
|
||||
"""
|
||||
Gathers (i.e. awaits) **all** tasks in the pool, then closes it.
|
||||
|
||||
Once this method is called, no more tasks can be started in the pool.
|
||||
After this method returns, no more tasks can be started in the pool.
|
||||
|
||||
The `lock()` method must have been called prior to this.
|
||||
|
||||
Note that this method may block indefinitely as long as any task in the pool is not done. This includes meta
|
||||
tasks launched by other methods, which may or may not even end by themselves. To avoid this, make sure to call
|
||||
:meth:`cancel_all` first.
|
||||
tasks launched by methods such as :meth:`map`, which end by themselves, only once the arguments iterator is
|
||||
fully consumed (which may not even be possible). To avoid this, make sure to call :meth:`cancel_all` first.
|
||||
|
||||
This method may also block, if one of the tasks blocks while catching a `asyncio.CancelledError` or if any of
|
||||
the callbacks registered for a task blocks for whatever reason.
|
||||
@ -539,105 +612,59 @@ class BaseTaskPool:
|
||||
Raises:
|
||||
`PoolStillUnlocked`: The pool has not been locked yet.
|
||||
"""
|
||||
self.lock()
|
||||
not_cancelled_meta_tasks = (task for task_set in self._group_meta_tasks_running.values() for task in task_set)
|
||||
with suppress(CancelledError):
|
||||
await gather(*self._meta_tasks_cancelled, *not_cancelled_meta_tasks, return_exceptions=return_exceptions)
|
||||
self._meta_tasks_cancelled.clear()
|
||||
self._group_meta_tasks_running.clear()
|
||||
await gather(*self._tasks_ended.values(), *self._tasks_cancelled.values(), *self._tasks_running.values(),
|
||||
return_exceptions=return_exceptions)
|
||||
self._tasks_ended.clear()
|
||||
self._tasks_cancelled.clear()
|
||||
self._tasks_running.clear()
|
||||
self._closed = True
|
||||
# TODO: Turn the `_closed` attribute into an `Event` and add something like a `until_closed` method that will
|
||||
# await it to allow blocking until a closing command comes from a server.
|
||||
await super().gather_and_close(return_exceptions=return_exceptions)
|
||||
|
||||
|
||||
class TaskPool(BaseTaskPool):
|
||||
@staticmethod
|
||||
def _generate_group_name(prefix: str, coroutine_function: CoroutineFunc) -> str:
|
||||
"""
|
||||
General purpose task pool class.
|
||||
|
||||
Attempts to emulate part of the interface of `multiprocessing.pool.Pool` from the stdlib.
|
||||
|
||||
A `TaskPool` instance can manage an arbitrary number of concurrent tasks from any coroutine function.
|
||||
Tasks in the pool can all belong to the same coroutine function,
|
||||
but they can also come from any number of different and unrelated coroutine functions.
|
||||
|
||||
As long as there is room in the pool, more tasks can be added. (By default, there is no pool size limit.)
|
||||
Each task started in the pool receives a unique ID, which can be used to cancel specific tasks at any moment.
|
||||
|
||||
Adding tasks blocks **only if** the pool is full at that moment.
|
||||
"""
|
||||
|
||||
def _generate_group_name(self, prefix: str, coroutine_function: CoroutineFunc) -> str:
|
||||
"""
|
||||
Creates a unique task group identifier.
|
||||
Creates a task group identifier that includes the current datetime.
|
||||
|
||||
Args:
|
||||
prefix: The start of the name; will be followed by an underscore.
|
||||
coroutine_function: The function representing the task group.
|
||||
|
||||
Returns:
|
||||
The constructed '{prefix}-{name}-group-{idx}' string to name a task group.
|
||||
(With `name` being the name of the `coroutine_function` and `idx` being an incrementing index.)
|
||||
The constructed 'prefix_function_datetime' string to name a task group.
|
||||
"""
|
||||
base_name = f'{prefix}-{coroutine_function.__name__}-group'
|
||||
i = 0
|
||||
while True:
|
||||
name = f'{base_name}-{i}'
|
||||
if name not in self._task_groups.keys():
|
||||
return name
|
||||
i += 1
|
||||
return f'{prefix}_{coroutine_function.__name__}_{datetime.now().strftime(DATETIME_FORMAT)}'
|
||||
|
||||
async def _apply_spawner(self, group_name: str, func: CoroutineFunc, args: ArgsT = (), kwargs: KwArgsT = None,
|
||||
async def _apply_num(self, group_name: str, func: CoroutineFunc, args: ArgsT = (), kwargs: KwArgsT = None,
|
||||
num: int = 1, end_callback: EndCB = None, cancel_callback: CancelCB = None) -> None:
|
||||
"""
|
||||
Creates coroutines with the supplied arguments and runs them as new tasks in the pool.
|
||||
Creates a coroutine with the supplied arguments and runs it as a new task in the pool.
|
||||
|
||||
This method blocks, **only if** the pool has not enough room to accommodate `num` new tasks.
|
||||
|
||||
Args:
|
||||
group_name:
|
||||
Name of the task group to add the new tasks to.
|
||||
Name of the task group to add the new task to.
|
||||
func:
|
||||
The coroutine function to be run in `num` tasks within the task pool.
|
||||
The coroutine function to be run as a task within the task pool.
|
||||
args (optional):
|
||||
The positional arguments to pass into each function call.
|
||||
The positional arguments to pass into the function call.
|
||||
kwargs (optional):
|
||||
The keyword-arguments to pass into each function call.
|
||||
The keyword-arguments to pass into the function call.
|
||||
num (optional):
|
||||
The number of tasks to spawn with the specified parameters.
|
||||
end_callback (optional):
|
||||
A callback to execute after each task has ended.
|
||||
A callback to execute after the task has ended.
|
||||
It is run with the task's ID as its only positional argument.
|
||||
cancel_callback (optional):
|
||||
A callback to execute after cancellation of each task.
|
||||
A callback to execute after cancellation of the task.
|
||||
It is run with the task's ID as its only positional argument.
|
||||
"""
|
||||
if kwargs is None:
|
||||
kwargs = {}
|
||||
for i in range(num):
|
||||
try:
|
||||
coroutine = func(*args, **kwargs)
|
||||
except Exception as e:
|
||||
# This means there was probably something wrong with the function arguments.
|
||||
log.exception("%s occurred in group '%s' while trying to create coroutine: %s(*%s, **%s)",
|
||||
str(e.__class__.__name__), group_name, func.__name__, repr(args), repr(kwargs))
|
||||
continue # TODO: Consider returning instead of continuing
|
||||
try:
|
||||
await self._start_task(coroutine, group_name=group_name, end_callback=end_callback,
|
||||
cancel_callback=cancel_callback)
|
||||
except CancelledError:
|
||||
# Either the task group or all tasks were cancelled, so this meta tasks is not supposed to spawn any
|
||||
# more tasks and can return immediately.
|
||||
log.debug("Cancelled group '%s' after %s out of %s tasks have been spawned", group_name, i, num)
|
||||
coroutine.close()
|
||||
return
|
||||
await gather(*(self._start_task(func(*args, **kwargs), group_name=group_name, end_callback=end_callback,
|
||||
cancel_callback=cancel_callback) for _ in range(num)))
|
||||
|
||||
def apply(self, func: CoroutineFunc, args: ArgsT = (), kwargs: KwArgsT = None, num: int = 1, group_name: str = None,
|
||||
end_callback: EndCB = None, cancel_callback: CancelCB = None) -> str:
|
||||
async def apply(self, func: CoroutineFunc, args: ArgsT = (), kwargs: KwArgsT = None, num: int = 1,
|
||||
group_name: str = None, end_callback: EndCB = None, cancel_callback: CancelCB = None) -> str:
|
||||
"""
|
||||
Creates tasks with the supplied arguments to be run in the pool.
|
||||
|
||||
@ -646,12 +673,7 @@ class TaskPool(BaseTaskPool):
|
||||
|
||||
All the new tasks are added to the same task group.
|
||||
|
||||
Because this method delegates the spawning of the tasks to a meta task, it **never blocks**. However, just
|
||||
because this method returns immediately, this does not mean that any task was started or that any number of
|
||||
tasks will start soon, as this is solely determined by the :attr:`BaseTaskPool.pool_size` and `num`.
|
||||
|
||||
If the entire task group is cancelled before `num` tasks have spawned, since the meta task is cancelled first,
|
||||
the number of tasks spawned will end up being less than `num`.
|
||||
This method blocks, **only if** the pool has not enough room to accommodate `num` new tasks.
|
||||
|
||||
Args:
|
||||
func:
|
||||
@ -661,11 +683,9 @@ class TaskPool(BaseTaskPool):
|
||||
kwargs (optional):
|
||||
The keyword-arguments to pass into each function call.
|
||||
num (optional):
|
||||
The number of tasks to spawn with the specified parameters. Defaults to 1.
|
||||
The number of tasks to spawn with the specified parameters.
|
||||
group_name (optional):
|
||||
Name of the task group to add the new tasks to. By default, a unique name is constructed in the form
|
||||
:code:`'apply-{name}-group-{idx}'` (with `name` being the name of the `func` and `idx` being an
|
||||
incrementing index).
|
||||
Name of the task group to add the new tasks to.
|
||||
end_callback (optional):
|
||||
A callback to execute after a task has ended.
|
||||
It is run with the task's ID as its only positional argument.
|
||||
@ -674,28 +694,25 @@ class TaskPool(BaseTaskPool):
|
||||
It is run with the task's ID as its only positional argument.
|
||||
|
||||
Returns:
|
||||
The name of the newly created group (see the `group_name` parameter).
|
||||
The name of the task group that the newly spawned tasks have been added to.
|
||||
|
||||
Raises:
|
||||
`PoolIsClosed`: The pool is closed.
|
||||
`NotCoroutine`: `func` is not a coroutine function.
|
||||
`PoolIsLocked`: The pool is currently locked.
|
||||
`InvalidGroupName`: A group named `group_name` exists in the pool.
|
||||
"""
|
||||
self._check_start(function=func)
|
||||
if group_name is None:
|
||||
group_name = self._generate_group_name('apply', func)
|
||||
if group_name in self._task_groups.keys():
|
||||
raise exceptions.InvalidGroupName(f"Group named {group_name} already exists!")
|
||||
self._task_groups.setdefault(group_name, TaskGroupRegister())
|
||||
meta_tasks = self._group_meta_tasks_running.setdefault(group_name, set())
|
||||
meta_tasks.add(create_task(self._apply_spawner(group_name, func, args, kwargs, num,
|
||||
end_callback=end_callback, cancel_callback=cancel_callback)))
|
||||
group_reg = self._task_groups.setdefault(group_name, TaskGroupRegister())
|
||||
async with group_reg:
|
||||
task = create_task(self._apply_num(group_name, func, args, kwargs, num, end_callback, cancel_callback))
|
||||
await task
|
||||
return group_name
|
||||
|
||||
@staticmethod
|
||||
def _get_map_end_callback(map_semaphore: Semaphore, actual_end_callback: EndCB) -> EndCB:
|
||||
"""Returns a wrapped `end_callback` for each :meth:`_arg_consumer` task that releases the `map_semaphore`."""
|
||||
"""Returns a wrapped `end_callback` for each :meth:`_queue_consumer` task that releases the `map_semaphore`."""
|
||||
async def release_callback(task_id: int) -> None:
|
||||
map_semaphore.release()
|
||||
await execute_optional(actual_end_callback, args=(task_id,))
|
||||
@ -729,33 +746,29 @@ class TaskPool(BaseTaskPool):
|
||||
The callback that was specified to execute after cancellation of the task (and the next one).
|
||||
It is run with the task's ID as its only positional argument.
|
||||
"""
|
||||
semaphore = Semaphore(num_concurrent)
|
||||
release_cb = self._get_map_end_callback(semaphore, actual_end_callback=end_callback)
|
||||
for i, next_arg in enumerate(arg_iter):
|
||||
semaphore_acquired = False
|
||||
try:
|
||||
coroutine = star_function(func, next_arg, arg_stars=arg_stars)
|
||||
except Exception as e:
|
||||
# This means there was probably something wrong with the function arguments.
|
||||
log.exception("%s occurred in group '%s' while trying to create coroutine: %s(%s%s)",
|
||||
str(e.__class__.__name__), group_name, func.__name__, '*' * arg_stars, str(next_arg))
|
||||
continue
|
||||
try:
|
||||
map_semaphore = Semaphore(num_concurrent)
|
||||
release_cb = self._get_map_end_callback(map_semaphore, actual_end_callback=end_callback)
|
||||
for next_arg in arg_iter:
|
||||
# When the number of running tasks spawned by this method reaches the specified maximum,
|
||||
# this next line will block, until one of them ends and releases the semaphore.
|
||||
semaphore_acquired = await semaphore.acquire()
|
||||
await self._start_task(coroutine, group_name=group_name, ignore_lock=True,
|
||||
end_callback=release_cb, cancel_callback=cancel_callback)
|
||||
await map_semaphore.acquire()
|
||||
try:
|
||||
await self._start_task(star_function(func, next_arg, arg_stars=arg_stars), group_name=group_name,
|
||||
ignore_lock=True, end_callback=release_cb, cancel_callback=cancel_callback)
|
||||
except CancelledError:
|
||||
# Either the task group or all tasks were cancelled, so this meta tasks is not supposed to spawn any
|
||||
# more tasks and can return immediately. (This means we drop `arg_iter` without consuming it fully.)
|
||||
log.debug("Cancelled group '%s' after %s tasks have been spawned", group_name, i)
|
||||
coroutine.close()
|
||||
if semaphore_acquired:
|
||||
semaphore.release()
|
||||
# This means that no more tasks are supposed to be created from this `arg_iter`;
|
||||
# thus, we can forget about the rest of the arguments.
|
||||
log.debug("Cancelled consumption of argument iterable in task group '%s'", group_name)
|
||||
map_semaphore.release()
|
||||
return
|
||||
except Exception as e:
|
||||
# This means an exception occurred during task **creation**, meaning no task has been created.
|
||||
# It does not imply an error within the task itself.
|
||||
log.exception("%s occurred while trying to create task: %s(%s%s)",
|
||||
str(e.__class__.__name__), func.__name__, '*' * arg_stars, str(next_arg))
|
||||
map_semaphore.release()
|
||||
|
||||
def _map(self, group_name: str, num_concurrent: int, func: CoroutineFunc, arg_iter: ArgsT, arg_stars: int,
|
||||
async def _map(self, group_name: str, num_concurrent: int, func: CoroutineFunc, arg_iter: ArgsT, arg_stars: int,
|
||||
end_callback: EndCB = None, cancel_callback: CancelCB = None) -> None:
|
||||
"""
|
||||
Creates tasks in the pool with arguments from the supplied iterable.
|
||||
@ -774,9 +787,6 @@ class TaskPool(BaseTaskPool):
|
||||
because this method returns immediately, this does not mean that any task was started or that any number of
|
||||
tasks will start soon, as this is solely determined by the :attr:`BaseTaskPool.pool_size` and `num_concurrent`.
|
||||
|
||||
If the entire task group is cancelled, the meta task is cancelled first, which means that `arg_iter` may be
|
||||
abandoned before being fully consumed (if that is even possible).
|
||||
|
||||
Args:
|
||||
group_name:
|
||||
Name of the task group to add the new tasks to. It must be a name that doesn't exist yet.
|
||||
@ -804,12 +814,13 @@ class TaskPool(BaseTaskPool):
|
||||
raise ValueError("`num_concurrent` must be a positive integer.")
|
||||
if group_name in self._task_groups.keys():
|
||||
raise exceptions.InvalidGroupName(f"Group named {group_name} already exists!")
|
||||
self._task_groups[group_name] = TaskGroupRegister()
|
||||
self._task_groups[group_name] = group_reg = TaskGroupRegister()
|
||||
async with group_reg:
|
||||
meta_tasks = self._group_meta_tasks_running.setdefault(group_name, set())
|
||||
meta_tasks.add(create_task(self._arg_consumer(group_name, num_concurrent, func, arg_iter, arg_stars,
|
||||
end_callback=end_callback, cancel_callback=cancel_callback)))
|
||||
|
||||
def map(self, func: CoroutineFunc, arg_iter: ArgsT, num_concurrent: int = 1, group_name: str = None,
|
||||
async def map(self, func: CoroutineFunc, arg_iter: ArgsT, num_concurrent: int = 1, group_name: str = None,
|
||||
end_callback: EndCB = None, cancel_callback: CancelCB = None) -> str:
|
||||
"""
|
||||
A task-based equivalent of the `multiprocessing.pool.Pool.map` method.
|
||||
@ -829,9 +840,6 @@ class TaskPool(BaseTaskPool):
|
||||
because this method returns immediately, this does not mean that any task was started or that any number of
|
||||
tasks will start soon, as this is solely determined by the :attr:`BaseTaskPool.pool_size` and `num_concurrent`.
|
||||
|
||||
If the entire task group is cancelled, the meta task is cancelled first, which means that `arg_iter` may be
|
||||
abandoned before being fully consumed (if that is even possible).
|
||||
|
||||
Args:
|
||||
func:
|
||||
The coroutine function to use for spawning the new tasks within the task pool.
|
||||
@ -841,8 +849,6 @@ class TaskPool(BaseTaskPool):
|
||||
The number new tasks spawned by this method to run concurrently. Defaults to 1.
|
||||
group_name (optional):
|
||||
Name of the task group to add the new tasks to. If provided, it must be a name that doesn't exist yet.
|
||||
By default, a unique name is constructed in the form :code:`'map-{name}-group-{idx}'`
|
||||
(with `name` being the name of the `func` and `idx` being an incrementing index).
|
||||
end_callback (optional):
|
||||
A callback to execute after a task has ended.
|
||||
It is run with the task's ID as its only positional argument.
|
||||
@ -851,7 +857,7 @@ class TaskPool(BaseTaskPool):
|
||||
It is run with the task's ID as its only positional argument.
|
||||
|
||||
Returns:
|
||||
The name of the newly created group (see the `group_name` parameter).
|
||||
The name of the task group that the newly spawned tasks will be added to.
|
||||
|
||||
Raises:
|
||||
`PoolIsClosed`: The pool is closed.
|
||||
@ -862,41 +868,34 @@ class TaskPool(BaseTaskPool):
|
||||
"""
|
||||
if group_name is None:
|
||||
group_name = self._generate_group_name('map', func)
|
||||
self._map(group_name, num_concurrent, func, arg_iter, 0,
|
||||
await self._map(group_name, num_concurrent, func, arg_iter, 0,
|
||||
end_callback=end_callback, cancel_callback=cancel_callback)
|
||||
return group_name
|
||||
|
||||
def starmap(self, func: CoroutineFunc, args_iter: Iterable[ArgsT], num_concurrent: int = 1, group_name: str = None,
|
||||
end_callback: EndCB = None, cancel_callback: CancelCB = None) -> str:
|
||||
async def starmap(self, func: CoroutineFunc, args_iter: Iterable[ArgsT], num_concurrent: int = 1,
|
||||
group_name: str = None, end_callback: EndCB = None, cancel_callback: CancelCB = None) -> str:
|
||||
"""
|
||||
Like :meth:`map` except that the elements of `args_iter` are expected to be iterables themselves to be unpacked
|
||||
as positional arguments to the function.
|
||||
Each coroutine then looks like `func(*args)`, `args` being an element from `args_iter`.
|
||||
|
||||
Returns:
|
||||
The name of the newly created group in the form :code:`'starmap-{name}-group-{index}'`
|
||||
(with `name` being the name of the `func` and `idx` being an incrementing index).
|
||||
"""
|
||||
if group_name is None:
|
||||
group_name = self._generate_group_name('starmap', func)
|
||||
self._map(group_name, num_concurrent, func, args_iter, 1,
|
||||
await self._map(group_name, num_concurrent, func, args_iter, 1,
|
||||
end_callback=end_callback, cancel_callback=cancel_callback)
|
||||
return group_name
|
||||
|
||||
def doublestarmap(self, func: CoroutineFunc, kwargs_iter: Iterable[KwArgsT], num_concurrent: int = 1,
|
||||
group_name: str = None, end_callback: EndCB = None, cancel_callback: CancelCB = None) -> str:
|
||||
async def doublestarmap(self, func: CoroutineFunc, kwargs_iter: Iterable[KwArgsT], num_concurrent: int = 1,
|
||||
group_name: str = None, end_callback: EndCB = None,
|
||||
cancel_callback: CancelCB = None) -> str:
|
||||
"""
|
||||
Like :meth:`map` except that the elements of `kwargs_iter` are expected to be iterables themselves to be
|
||||
unpacked as keyword-arguments to the function.
|
||||
Each coroutine then looks like `func(**kwargs)`, `kwargs` being an element from `kwargs_iter`.
|
||||
|
||||
Returns:
|
||||
The name of the newly created group in the form :code:`'doublestarmap-{name}-group-{index}'`
|
||||
(with `name` being the name of the `func` and `idx` being an incrementing index).
|
||||
"""
|
||||
if group_name is None:
|
||||
group_name = self._generate_group_name('doublestarmap', func)
|
||||
self._map(group_name, num_concurrent, func, kwargs_iter, 2,
|
||||
await self._map(group_name, num_concurrent, func, kwargs_iter, 2,
|
||||
end_callback=end_callback, cancel_callback=cancel_callback)
|
||||
return group_name
|
||||
|
||||
@ -952,7 +951,6 @@ class SimpleTaskPool(BaseTaskPool):
|
||||
self._kwargs: KwArgsT = kwargs if kwargs is not None else {}
|
||||
self._end_callback: EndCB = end_callback
|
||||
self._cancel_callback: CancelCB = cancel_callback
|
||||
self._start_calls: int = 0
|
||||
super().__init__(pool_size=pool_size, name=name)
|
||||
|
||||
@property
|
||||
@ -960,52 +958,26 @@ class SimpleTaskPool(BaseTaskPool):
|
||||
"""Name of the coroutine function used in the pool."""
|
||||
return self._func.__name__
|
||||
|
||||
async def _start_num(self, num: int, group_name: str) -> None:
|
||||
"""Starts `num` new tasks in group `group_name`."""
|
||||
for i in range(num):
|
||||
try:
|
||||
coroutine = self._func(*self._args, **self._kwargs)
|
||||
except Exception as e:
|
||||
# This means there was probably something wrong with the function arguments.
|
||||
log.exception("%s occurred in '%s' while trying to create coroutine: %s(*%s, **%s)",
|
||||
str(e.__class__.__name__), str(self), self._func.__name__,
|
||||
repr(self._args), repr(self._kwargs))
|
||||
continue # TODO: Consider returning instead of continuing
|
||||
try:
|
||||
await self._start_task(coroutine, group_name=group_name, end_callback=self._end_callback,
|
||||
cancel_callback=self._cancel_callback)
|
||||
except CancelledError:
|
||||
# Either the task group or all tasks were cancelled, so this meta tasks is not supposed to spawn any
|
||||
# more tasks and can return immediately.
|
||||
log.debug("Cancelled group '%s' after %s out of %s tasks have been spawned", group_name, i, num)
|
||||
coroutine.close()
|
||||
return
|
||||
async def _start_one(self) -> int:
|
||||
"""Starts a single new task within the pool and returns its ID."""
|
||||
return await self._start_task(self._func(*self._args, **self._kwargs),
|
||||
end_callback=self._end_callback, cancel_callback=self._cancel_callback)
|
||||
|
||||
def start(self, num: int) -> str:
|
||||
async def start(self, num: int) -> List[int]:
|
||||
"""
|
||||
Starts specified number of new tasks in the pool as a new group.
|
||||
Starts specified number of new tasks in the pool and returns their IDs.
|
||||
|
||||
Because this method delegates the spawning of the tasks to a meta task, it **never blocks**. However, just
|
||||
because this method returns immediately, this does not mean that any task was started or that any number of
|
||||
tasks will start soon, as this is solely determined by the :attr:`BaseTaskPool.pool_size` and `num`.
|
||||
|
||||
If the entire task group is cancelled before `num` tasks have spawned, since the meta task is cancelled first,
|
||||
the number of tasks spawned will end up being less than `num`.
|
||||
This method may block if there is less room in the pool than the desired number of new tasks.
|
||||
|
||||
Args:
|
||||
num: The number of new tasks to start.
|
||||
|
||||
Returns:
|
||||
The name of the newly created task group in the form :code:`'start-group-{idx}'`
|
||||
(with `idx` being an incrementing index).
|
||||
List of IDs of the new tasks that have been started (not necessarily in the order they were started).
|
||||
"""
|
||||
self._check_start(function=self._func)
|
||||
group_name = f'start-group-{self._start_calls}'
|
||||
self._start_calls += 1
|
||||
self._task_groups.setdefault(group_name, TaskGroupRegister())
|
||||
meta_tasks = self._group_meta_tasks_running.setdefault(group_name, set())
|
||||
meta_tasks.add(create_task(self._start_num(num, group_name)))
|
||||
return group_name
|
||||
ids = await gather(*(self._start_one() for _ in range(num)))
|
||||
assert isinstance(ids, list) # for PyCharm
|
||||
return ids
|
||||
|
||||
def stop(self, num: int) -> List[int]:
|
||||
"""
|
||||
|
@ -1,30 +0,0 @@
|
||||
__author__ = "Daniil Fajnberg"
|
||||
__copyright__ = "Copyright © 2022 Daniil Fajnberg"
|
||||
__license__ = """GNU LGPLv3.0
|
||||
|
||||
This file is part of asyncio-taskpool.
|
||||
|
||||
asyncio-taskpool is free software: you can redistribute it and/or modify it under the terms of
|
||||
version 3.0 of the GNU Lesser General Public License as published by the Free Software Foundation.
|
||||
|
||||
asyncio-taskpool is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
|
||||
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
See the GNU Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License along with asyncio-taskpool.
|
||||
If not, see <https://www.gnu.org/licenses/>."""
|
||||
|
||||
__doc__ = """
|
||||
Main entry point for all unit tests.
|
||||
"""
|
||||
|
||||
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_suite = unittest.defaultTestLoader.discover('.')
|
||||
test_runner = unittest.TextTestRunner(resultclass=unittest.TextTestResult)
|
||||
result = test_runner.run(test_suite)
|
||||
sys.exit(not result.wasSuccessful())
|
@ -38,7 +38,7 @@ class CLITestCase(IsolatedAsyncioTestCase):
|
||||
mock_client = MagicMock(start=mock_client_start)
|
||||
mock_client_cls = MagicMock(return_value=mock_client)
|
||||
mock_client_kwargs = {'foo': 123, 'bar': 456, 'baz': 789}
|
||||
mock_parse_cli.return_value = {module.CLIENT_CLASS: mock_client_cls, **mock_client_kwargs}
|
||||
mock_parse_cli.return_value = {module.CLIENT_CLASS: mock_client_cls} | mock_client_kwargs
|
||||
self.assertIsNone(await module.main())
|
||||
mock_parse_cli.assert_called_once_with()
|
||||
mock_client_cls.assert_called_once_with(**mock_client_kwargs)
|
||||
|
@ -41,9 +41,9 @@ class ControlParserTestCase(TestCase):
|
||||
self.help_formatter_factory_patcher = patch.object(parser.ControlParser, 'help_formatter_factory')
|
||||
self.mock_help_formatter_factory = self.help_formatter_factory_patcher.start()
|
||||
self.mock_help_formatter_factory.return_value = RawTextHelpFormatter
|
||||
self.stream, self.terminal_width = MagicMock(), 420
|
||||
self.stream_writer, self.terminal_width = MagicMock(), 420
|
||||
self.kwargs = {
|
||||
'stream': self.stream,
|
||||
'stream_writer': self.stream_writer,
|
||||
'terminal_width': self.terminal_width,
|
||||
'formatter_class': FOO
|
||||
}
|
||||
@ -72,9 +72,10 @@ class ControlParserTestCase(TestCase):
|
||||
|
||||
def test_init(self):
|
||||
self.assertIsInstance(self.parser, ArgumentParser)
|
||||
self.assertEqual(self.stream, self.parser._stream)
|
||||
self.assertEqual(self.stream_writer, self.parser._stream_writer)
|
||||
self.assertEqual(self.terminal_width, self.parser._terminal_width)
|
||||
self.mock_help_formatter_factory.assert_called_once_with(self.terminal_width, FOO)
|
||||
self.assertFalse(getattr(self.parser, 'exit_on_error'))
|
||||
self.assertEqual(RawTextHelpFormatter, getattr(self.parser, 'formatter_class'))
|
||||
self.assertSetEqual(set(), self.parser._flags)
|
||||
self.assertIsNone(self.parser._commands)
|
||||
@ -88,7 +89,7 @@ class ControlParserTestCase(TestCase):
|
||||
mock_get_first_doc_line.return_value = mock_help = 'help 123'
|
||||
kwargs = {FOO: 1, BAR: 2, parser.DESCRIPTION: FOO + BAR}
|
||||
expected_name = 'foo-bar'
|
||||
expected_kwargs = {parser.NAME: expected_name, parser.PROG: expected_name, parser.HELP: mock_help, **kwargs}
|
||||
expected_kwargs = {parser.NAME: expected_name, parser.PROG: expected_name, parser.HELP: mock_help} | kwargs
|
||||
to_omit = ['abc', 'xyz']
|
||||
output = self.parser.add_function_command(foo_bar, omit_params=to_omit, **kwargs)
|
||||
self.assertEqual(mock_subparser, output)
|
||||
@ -106,7 +107,7 @@ class ControlParserTestCase(TestCase):
|
||||
mock_get_first_doc_line.return_value = mock_help = 'help 123'
|
||||
kwargs = {FOO: 1, BAR: 2, parser.DESCRIPTION: FOO + BAR}
|
||||
expected_name = 'get-prop'
|
||||
expected_kwargs = {parser.NAME: expected_name, parser.PROG: expected_name, parser.HELP: mock_help, **kwargs}
|
||||
expected_kwargs = {parser.NAME: expected_name, parser.PROG: expected_name, parser.HELP: mock_help} | kwargs
|
||||
output = self.parser.add_property_command(prop, **kwargs)
|
||||
self.assertEqual(mock_subparser, output)
|
||||
mock_get_first_doc_line.assert_called_once_with(get_prop)
|
||||
@ -118,7 +119,7 @@ class ControlParserTestCase(TestCase):
|
||||
|
||||
prop = property(get_prop, set_prop)
|
||||
expected_help = f"Get/set the `.{expected_name}` property"
|
||||
expected_kwargs = {parser.NAME: expected_name, parser.PROG: expected_name, parser.HELP: expected_help, **kwargs}
|
||||
expected_kwargs = {parser.NAME: expected_name, parser.PROG: expected_name, parser.HELP: expected_help} | kwargs
|
||||
output = self.parser.add_property_command(prop, **kwargs)
|
||||
self.assertEqual(mock_subparser, output)
|
||||
mock_get_first_doc_line.assert_has_calls([call(get_prop), call(set_prop)])
|
||||
@ -151,7 +152,8 @@ class ControlParserTestCase(TestCase):
|
||||
mock_subparser = MagicMock(set_defaults=mock_set_defaults)
|
||||
mock_add_function_command.return_value = mock_add_property_command.return_value = mock_subparser
|
||||
x = 'x'
|
||||
common_kwargs = {'stream': self.parser._stream, parser.CLIENT_INFO.TERMINAL_WIDTH: self.parser._terminal_width}
|
||||
common_kwargs = {parser.STREAM_WRITER: self.parser._stream_writer,
|
||||
parser.CLIENT_INFO.TERMINAL_WIDTH: self.parser._terminal_width}
|
||||
expected_output = {'method': mock_subparser, 'prop': mock_subparser}
|
||||
output = self.parser.add_class_commands(FooBar, public_only=True, omit_members=['to_omit'], member_arg_name=x)
|
||||
self.assertDictEqual(expected_output, output)
|
||||
@ -168,12 +170,12 @@ class ControlParserTestCase(TestCase):
|
||||
mock_base_add_subparsers.assert_called_once_with(*args, **kwargs)
|
||||
|
||||
def test__print_message(self):
|
||||
self.stream.write = MagicMock()
|
||||
self.stream_writer.write = MagicMock()
|
||||
self.assertIsNone(self.parser._print_message(''))
|
||||
self.stream.write.assert_not_called()
|
||||
self.stream_writer.write.assert_not_called()
|
||||
msg = 'foo bar baz'
|
||||
self.assertIsNone(self.parser._print_message(msg))
|
||||
self.stream.write.assert_called_once_with(msg)
|
||||
self.stream_writer.write.assert_called_once_with(msg.encode())
|
||||
|
||||
@patch.object(parser.ControlParser, '_print_message')
|
||||
def test_exit(self, mock__print_message: MagicMock):
|
||||
|
@ -21,12 +21,11 @@ Unittests for the `asyncio_taskpool.session` module.
|
||||
|
||||
import json
|
||||
from argparse import ArgumentError, Namespace
|
||||
from io import StringIO
|
||||
from unittest import IsolatedAsyncioTestCase
|
||||
from unittest.mock import AsyncMock, MagicMock, patch, call
|
||||
|
||||
from asyncio_taskpool.control import session
|
||||
from asyncio_taskpool.internals.constants import CLIENT_INFO, CMD, SESSION_MSG_BYTES
|
||||
from asyncio_taskpool.internals.constants import CLIENT_INFO, CMD, SESSION_MSG_BYTES, STREAM_WRITER
|
||||
from asyncio_taskpool.exceptions import HelpRequested
|
||||
from asyncio_taskpool.pool import SimpleTaskPool
|
||||
|
||||
@ -62,15 +61,14 @@ class ControlServerTestCase(IsolatedAsyncioTestCase):
|
||||
self.assertEqual(self.mock_reader, self.session._reader)
|
||||
self.assertEqual(self.mock_writer, self.session._writer)
|
||||
self.assertIsNone(self.session._parser)
|
||||
self.assertIsInstance(self.session._response_buffer, StringIO)
|
||||
|
||||
@patch.object(session, 'return_or_exception')
|
||||
async def test__exec_method_and_respond(self, mock_return_or_exception: AsyncMock):
|
||||
def method(self, arg1, arg2, *var_args, **rest): pass
|
||||
test_arg1, test_arg2, test_var_args, test_rest = 123, 'xyz', [0.1, 0.2, 0.3], {'aaa': 1, 'bbb': 11}
|
||||
kwargs = {'arg1': test_arg1, 'arg2': test_arg2, 'var_args': test_var_args}
|
||||
kwargs = {'arg1': test_arg1, 'arg2': test_arg2, 'var_args': test_var_args} | test_rest
|
||||
mock_return_or_exception.return_value = None
|
||||
self.assertIsNone(await self.session._exec_method_and_respond(method, **kwargs, **test_rest))
|
||||
self.assertIsNone(await self.session._exec_method_and_respond(method, **kwargs))
|
||||
mock_return_or_exception.assert_awaited_once_with(
|
||||
method, self.mock_pool, test_arg1, test_arg2, *test_var_args, **test_rest
|
||||
)
|
||||
@ -106,7 +104,7 @@ class ControlServerTestCase(IsolatedAsyncioTestCase):
|
||||
self.mock_reader.read = mock_read
|
||||
self.mock_writer.drain = AsyncMock()
|
||||
expected_parser_kwargs = {
|
||||
'stream': self.session._response_buffer,
|
||||
STREAM_WRITER: self.mock_writer,
|
||||
CLIENT_INFO.TERMINAL_WIDTH: width,
|
||||
'prog': '',
|
||||
'usage': f'[-h] [{CMD}] ...'
|
||||
@ -134,9 +132,10 @@ class ControlServerTestCase(IsolatedAsyncioTestCase):
|
||||
kwargs = {FOO: BAR, 'hello': 'python'}
|
||||
mock_parse_args = MagicMock(return_value=Namespace(**{CMD: method}, **kwargs))
|
||||
self.session._parser = MagicMock(parse_args=mock_parse_args)
|
||||
self.mock_writer.write = MagicMock()
|
||||
self.assertIsNone(await self.session._parse_command(msg))
|
||||
mock_parse_args.assert_called_once_with(msg.split(' '))
|
||||
self.assertEqual('', self.session._response_buffer.getvalue())
|
||||
self.mock_writer.write.assert_not_called()
|
||||
mock__exec_method_and_respond.assert_awaited_once_with(method, **kwargs)
|
||||
mock__exec_property_and_respond.assert_not_called()
|
||||
|
||||
@ -146,7 +145,7 @@ class ControlServerTestCase(IsolatedAsyncioTestCase):
|
||||
mock_parse_args.return_value = Namespace(**{CMD: prop}, **kwargs)
|
||||
self.assertIsNone(await self.session._parse_command(msg))
|
||||
mock_parse_args.assert_called_once_with(msg.split(' '))
|
||||
self.assertEqual('', self.session._response_buffer.getvalue())
|
||||
self.mock_writer.write.assert_not_called()
|
||||
mock__exec_method_and_respond.assert_not_called()
|
||||
mock__exec_property_and_respond.assert_awaited_once_with(prop, **kwargs)
|
||||
|
||||
@ -162,28 +161,26 @@ class ControlServerTestCase(IsolatedAsyncioTestCase):
|
||||
mock_parse_args.assert_called_once_with(msg.split(' '))
|
||||
mock__exec_method_and_respond.assert_not_called()
|
||||
mock__exec_property_and_respond.assert_not_called()
|
||||
self.assertEqual(str(exc), self.session._response_buffer.getvalue())
|
||||
self.mock_writer.write.assert_called_once_with(str(exc).encode())
|
||||
|
||||
mock__exec_property_and_respond.reset_mock()
|
||||
mock_parse_args.reset_mock()
|
||||
self.session._response_buffer.seek(0)
|
||||
self.session._response_buffer.truncate()
|
||||
self.mock_writer.write.reset_mock()
|
||||
|
||||
mock_parse_args.side_effect = exc = ArgumentError(MagicMock(), "oops")
|
||||
self.assertIsNone(await self.session._parse_command(msg))
|
||||
mock_parse_args.assert_called_once_with(msg.split(' '))
|
||||
self.assertEqual(str(exc), self.session._response_buffer.getvalue())
|
||||
self.mock_writer.write.assert_called_once_with(str(exc).encode())
|
||||
mock__exec_method_and_respond.assert_not_awaited()
|
||||
mock__exec_property_and_respond.assert_not_awaited()
|
||||
|
||||
self.mock_writer.write.reset_mock()
|
||||
mock_parse_args.reset_mock()
|
||||
self.session._response_buffer.seek(0)
|
||||
self.session._response_buffer.truncate()
|
||||
|
||||
mock_parse_args.side_effect = HelpRequested()
|
||||
self.assertIsNone(await self.session._parse_command(msg))
|
||||
mock_parse_args.assert_called_once_with(msg.split(' '))
|
||||
self.assertEqual('', self.session._response_buffer.getvalue())
|
||||
self.mock_writer.write.assert_not_called()
|
||||
mock__exec_method_and_respond.assert_not_awaited()
|
||||
mock__exec_property_and_respond.assert_not_awaited()
|
||||
|
||||
@ -194,23 +191,17 @@ class ControlServerTestCase(IsolatedAsyncioTestCase):
|
||||
self.mock_writer.drain = AsyncMock(side_effect=make_reader_return_empty)
|
||||
msg = "fascinating"
|
||||
self.mock_reader.read = AsyncMock(return_value=f' {msg} '.encode())
|
||||
response = FOO + BAR + FOO
|
||||
self.session._response_buffer.write(response)
|
||||
self.assertIsNone(await self.session.listen())
|
||||
self.mock_reader.read.assert_has_awaits([call(SESSION_MSG_BYTES), call(SESSION_MSG_BYTES)])
|
||||
mock__parse_command.assert_awaited_once_with(msg)
|
||||
self.assertEqual('', self.session._response_buffer.getvalue())
|
||||
self.mock_writer.write.assert_called_once_with(response.encode())
|
||||
self.mock_writer.drain.assert_awaited_once_with()
|
||||
|
||||
self.mock_reader.read.reset_mock()
|
||||
mock__parse_command.reset_mock()
|
||||
self.mock_writer.write.reset_mock()
|
||||
self.mock_writer.drain.reset_mock()
|
||||
|
||||
self.mock_server.is_serving = MagicMock(return_value=False)
|
||||
self.assertIsNone(await self.session.listen())
|
||||
self.mock_reader.read.assert_not_awaited()
|
||||
mock__parse_command.assert_not_awaited()
|
||||
self.mock_writer.write.assert_not_called()
|
||||
self.mock_writer.drain.assert_not_awaited()
|
||||
|
@ -18,11 +18,10 @@ __doc__ = """
|
||||
Unittests for the `asyncio_taskpool.helpers` module.
|
||||
"""
|
||||
|
||||
import importlib
|
||||
from unittest import IsolatedAsyncioTestCase, TestCase
|
||||
|
||||
from unittest import IsolatedAsyncioTestCase
|
||||
from unittest.mock import MagicMock, AsyncMock, NonCallableMagicMock, call, patch
|
||||
|
||||
from asyncio_taskpool.internals import constants
|
||||
from asyncio_taskpool.internals import helpers
|
||||
|
||||
|
||||
@ -123,45 +122,3 @@ class HelpersTestCase(IsolatedAsyncioTestCase):
|
||||
with self.assertRaises(AttributeError):
|
||||
helpers.resolve_dotted_path('foo.bar.baz')
|
||||
mock_import_module.assert_has_calls([call('foo'), call('foo.bar')])
|
||||
|
||||
|
||||
class ClassMethodWorkaroundTestCase(TestCase):
|
||||
def test_init(self):
|
||||
def func(): return 'foo'
|
||||
def getter(): return 'bar'
|
||||
prop = property(getter)
|
||||
instance = helpers.ClassMethodWorkaround(func)
|
||||
self.assertIs(func, instance._getter)
|
||||
instance = helpers.ClassMethodWorkaround(prop)
|
||||
self.assertIs(getter, instance._getter)
|
||||
|
||||
@patch.object(helpers.ClassMethodWorkaround, '__init__', return_value=None)
|
||||
def test_get(self, _mock_init: MagicMock):
|
||||
def func(x: MagicMock): return x.__name__
|
||||
instance = helpers.ClassMethodWorkaround(MagicMock())
|
||||
instance._getter = func
|
||||
obj, cls = None, MagicMock
|
||||
expected_output = 'MagicMock'
|
||||
output = instance.__get__(obj, cls)
|
||||
self.assertEqual(expected_output, output)
|
||||
|
||||
obj = MagicMock(__name__='bar')
|
||||
expected_output = 'bar'
|
||||
output = instance.__get__(obj, cls)
|
||||
self.assertEqual(expected_output, output)
|
||||
|
||||
cls = None
|
||||
output = instance.__get__(obj, cls)
|
||||
self.assertEqual(expected_output, output)
|
||||
|
||||
def test_correct_class(self):
|
||||
is_older_python = constants.PYTHON_BEFORE_39
|
||||
try:
|
||||
constants.PYTHON_BEFORE_39 = True
|
||||
importlib.reload(helpers)
|
||||
self.assertIs(helpers.ClassMethodWorkaround, helpers.classmethod)
|
||||
constants.PYTHON_BEFORE_39 = False
|
||||
importlib.reload(helpers)
|
||||
self.assertIs(classmethod, helpers.classmethod)
|
||||
finally:
|
||||
constants.PYTHON_BEFORE_39 = is_older_python
|
||||
|
@ -20,11 +20,13 @@ Unittests for the `asyncio_taskpool.pool` module.
|
||||
|
||||
from asyncio.exceptions import CancelledError
|
||||
from asyncio.locks import Semaphore
|
||||
from datetime import datetime
|
||||
from unittest import IsolatedAsyncioTestCase
|
||||
from unittest.mock import PropertyMock, MagicMock, AsyncMock, patch, call
|
||||
from typing import Type
|
||||
|
||||
from asyncio_taskpool import pool, exceptions
|
||||
from asyncio_taskpool.internals.constants import DATETIME_FORMAT
|
||||
|
||||
|
||||
EMPTY_LIST, EMPTY_DICT, EMPTY_SET = [], {}, set()
|
||||
@ -93,9 +95,6 @@ class BaseTaskPoolTestCase(CommonTestCase):
|
||||
self.assertIsInstance(self.task_pool._enough_room, Semaphore)
|
||||
self.assertDictEqual(EMPTY_DICT, self.task_pool._task_groups)
|
||||
|
||||
self.assertDictEqual(EMPTY_DICT, self.task_pool._group_meta_tasks_running)
|
||||
self.assertSetEqual(EMPTY_SET, self.task_pool._meta_tasks_cancelled)
|
||||
|
||||
self.assertEqual(self.mock_idx, self.task_pool._idx)
|
||||
|
||||
self.mock__add_pool.assert_called_once_with(self.task_pool)
|
||||
@ -311,32 +310,100 @@ class BaseTaskPoolTestCase(CommonTestCase):
|
||||
self.task_pool._get_running_task(task_id)
|
||||
mock__task_name.assert_not_called()
|
||||
|
||||
@patch('warnings.warn')
|
||||
def test__get_cancel_kw(self, mock_warn: MagicMock):
|
||||
msg = None
|
||||
self.assertDictEqual(EMPTY_DICT, pool.BaseTaskPool._get_cancel_kw(msg))
|
||||
mock_warn.assert_not_called()
|
||||
|
||||
msg = 'something'
|
||||
with patch.object(pool, 'PYTHON_BEFORE_39', new=True):
|
||||
self.assertDictEqual(EMPTY_DICT, pool.BaseTaskPool._get_cancel_kw(msg))
|
||||
mock_warn.assert_called_once()
|
||||
mock_warn.reset_mock()
|
||||
|
||||
with patch.object(pool, 'PYTHON_BEFORE_39', new=False):
|
||||
self.assertDictEqual({'msg': msg}, pool.BaseTaskPool._get_cancel_kw(msg))
|
||||
mock_warn.assert_not_called()
|
||||
|
||||
@patch.object(pool.BaseTaskPool, '_get_cancel_kw')
|
||||
@patch.object(pool.BaseTaskPool, '_get_running_task')
|
||||
def test_cancel(self, mock__get_running_task: MagicMock, mock__get_cancel_kw: MagicMock):
|
||||
mock__get_cancel_kw.return_value = fake_cancel_kw = {'a': 10, 'b': 20}
|
||||
def test_cancel(self, mock__get_running_task: MagicMock):
|
||||
task_id1, task_id2, task_id3 = 1, 4, 9
|
||||
mock__get_running_task.return_value.cancel = mock_cancel = MagicMock()
|
||||
self.assertIsNone(self.task_pool.cancel(task_id1, task_id2, task_id3, msg=FOO))
|
||||
mock__get_running_task.assert_has_calls([call(task_id1), call(task_id2), call(task_id3)])
|
||||
mock__get_cancel_kw.assert_called_once_with(FOO)
|
||||
mock_cancel.assert_has_calls(3 * [call(**fake_cancel_kw)])
|
||||
mock_cancel.assert_has_calls([call(msg=FOO), call(msg=FOO), call(msg=FOO)])
|
||||
|
||||
def test__cancel_and_remove_all_from_group(self):
|
||||
task_id = 555
|
||||
mock_cancel = MagicMock()
|
||||
self.task_pool._tasks_running[task_id] = MagicMock(cancel=mock_cancel)
|
||||
|
||||
class MockRegister(set, MagicMock):
|
||||
pass
|
||||
self.assertIsNone(self.task_pool._cancel_and_remove_all_from_group(' ', MockRegister({task_id, 'x'}), msg=FOO))
|
||||
mock_cancel.assert_called_once_with(msg=FOO)
|
||||
|
||||
@patch.object(pool.BaseTaskPool, '_cancel_and_remove_all_from_group')
|
||||
async def test_cancel_group(self, mock__cancel_and_remove_all_from_group: MagicMock):
|
||||
mock_grp_aenter, mock_grp_aexit = AsyncMock(), AsyncMock()
|
||||
mock_group_reg = MagicMock(__aenter__=mock_grp_aenter, __aexit__=mock_grp_aexit)
|
||||
self.task_pool._task_groups[FOO] = mock_group_reg
|
||||
with self.assertRaises(exceptions.InvalidGroupName):
|
||||
await self.task_pool.cancel_group(BAR)
|
||||
mock__cancel_and_remove_all_from_group.assert_not_called()
|
||||
mock_grp_aenter.assert_not_called()
|
||||
mock_grp_aexit.assert_not_called()
|
||||
self.assertIsNone(await self.task_pool.cancel_group(FOO, msg=BAR))
|
||||
mock__cancel_and_remove_all_from_group.assert_called_once_with(FOO, mock_group_reg, msg=BAR)
|
||||
mock_grp_aenter.assert_awaited_once_with()
|
||||
mock_grp_aexit.assert_awaited_once()
|
||||
|
||||
@patch.object(pool.BaseTaskPool, '_cancel_and_remove_all_from_group')
|
||||
async def test_cancel_all(self, mock__cancel_and_remove_all_from_group: MagicMock):
|
||||
mock_grp_aenter, mock_grp_aexit = AsyncMock(), AsyncMock()
|
||||
mock_group_reg = MagicMock(__aenter__=mock_grp_aenter, __aexit__=mock_grp_aexit)
|
||||
self.task_pool._task_groups[BAR] = mock_group_reg
|
||||
self.assertIsNone(await self.task_pool.cancel_all(FOO))
|
||||
mock__cancel_and_remove_all_from_group.assert_called_once_with(BAR, mock_group_reg, msg=FOO)
|
||||
mock_grp_aenter.assert_awaited_once_with()
|
||||
mock_grp_aexit.assert_awaited_once()
|
||||
|
||||
async def test_flush(self):
|
||||
mock_ended_func, mock_cancelled_func = AsyncMock(), AsyncMock(side_effect=Exception)
|
||||
self.task_pool._tasks_ended = {123: mock_ended_func()}
|
||||
self.task_pool._tasks_cancelled = {456: mock_cancelled_func()}
|
||||
self.assertIsNone(await self.task_pool.flush(return_exceptions=True))
|
||||
mock_ended_func.assert_awaited_once_with()
|
||||
mock_cancelled_func.assert_awaited_once_with()
|
||||
self.assertDictEqual(EMPTY_DICT, self.task_pool._tasks_ended)
|
||||
self.assertDictEqual(EMPTY_DICT, self.task_pool._tasks_cancelled)
|
||||
|
||||
async def test_gather_and_close(self):
|
||||
mock_running_func = AsyncMock()
|
||||
mock_ended_func, mock_cancelled_func = AsyncMock(), AsyncMock(side_effect=Exception)
|
||||
self.task_pool._tasks_ended = ended = {123: mock_ended_func()}
|
||||
self.task_pool._tasks_cancelled = cancelled = {456: mock_cancelled_func()}
|
||||
self.task_pool._tasks_running = running = {789: mock_running_func()}
|
||||
|
||||
with self.assertRaises(exceptions.PoolStillUnlocked):
|
||||
await self.task_pool.gather_and_close()
|
||||
self.assertDictEqual(ended, self.task_pool._tasks_ended)
|
||||
self.assertDictEqual(cancelled, self.task_pool._tasks_cancelled)
|
||||
self.assertDictEqual(running, self.task_pool._tasks_running)
|
||||
self.assertFalse(self.task_pool._closed)
|
||||
|
||||
self.task_pool._locked = True
|
||||
self.assertIsNone(await self.task_pool.gather_and_close(return_exceptions=True))
|
||||
mock_ended_func.assert_awaited_once_with()
|
||||
mock_cancelled_func.assert_awaited_once_with()
|
||||
mock_running_func.assert_awaited_once_with()
|
||||
self.assertDictEqual(EMPTY_DICT, self.task_pool._tasks_ended)
|
||||
self.assertDictEqual(EMPTY_DICT, self.task_pool._tasks_cancelled)
|
||||
self.assertDictEqual(EMPTY_DICT, self.task_pool._tasks_running)
|
||||
self.assertTrue(self.task_pool._closed)
|
||||
|
||||
|
||||
class TaskPoolTestCase(CommonTestCase):
|
||||
TEST_CLASS = pool.TaskPool
|
||||
task_pool: pool.TaskPool
|
||||
|
||||
def setUp(self) -> None:
|
||||
self.base_class_init_patcher = patch.object(pool.BaseTaskPool, '__init__')
|
||||
self.base_class_init = self.base_class_init_patcher.start()
|
||||
super().setUp()
|
||||
|
||||
def tearDown(self) -> None:
|
||||
self.base_class_init_patcher.stop()
|
||||
super().tearDown()
|
||||
|
||||
def test_init(self):
|
||||
self.assertDictEqual(EMPTY_DICT, self.task_pool._group_meta_tasks_running)
|
||||
self.base_class_init.assert_called_once_with(pool_size=self.TEST_POOL_SIZE, name=self.TEST_POOL_NAME)
|
||||
|
||||
def test__cancel_group_meta_tasks(self):
|
||||
mock_task1, mock_task2 = MagicMock(), MagicMock()
|
||||
@ -353,48 +420,26 @@ class BaseTaskPoolTestCase(CommonTestCase):
|
||||
mock_task1.cancel.assert_called_once_with()
|
||||
mock_task2.cancel.assert_called_once_with()
|
||||
|
||||
@patch.object(pool.BaseTaskPool, '_cancel_group_meta_tasks')
|
||||
def test__cancel_and_remove_all_from_group(self, mock__cancel_group_meta_tasks: MagicMock):
|
||||
kw = {BAR: 10, BAZ: 20}
|
||||
task_id = 555
|
||||
mock_cancel = MagicMock()
|
||||
|
||||
def add_mock_task_to_running(_):
|
||||
self.task_pool._tasks_running[task_id] = MagicMock(cancel=mock_cancel)
|
||||
# We add the fake task to the `_tasks_running` dictionary as a side effect of calling the mocked method,
|
||||
# to verify that it is called first, before the cancellation loop starts.
|
||||
mock__cancel_group_meta_tasks.side_effect = add_mock_task_to_running
|
||||
|
||||
class MockRegister(set, MagicMock):
|
||||
pass
|
||||
self.assertIsNone(self.task_pool._cancel_and_remove_all_from_group(' ', MockRegister({task_id, 'x'}), **kw))
|
||||
mock_cancel.assert_called_once_with(**kw)
|
||||
|
||||
@patch.object(pool.BaseTaskPool, '_get_cancel_kw')
|
||||
@patch.object(pool.BaseTaskPool, '_cancel_and_remove_all_from_group')
|
||||
def test_cancel_group(self, mock__cancel_and_remove_all_from_group: MagicMock, mock__get_cancel_kw: MagicMock):
|
||||
mock__get_cancel_kw.return_value = fake_cancel_kw = {'a': 10, 'b': 20}
|
||||
self.task_pool._task_groups[FOO] = mock_group_reg = MagicMock()
|
||||
with self.assertRaises(exceptions.InvalidGroupName):
|
||||
self.task_pool.cancel_group(BAR)
|
||||
mock__cancel_and_remove_all_from_group.assert_not_called()
|
||||
self.assertIsNone(self.task_pool.cancel_group(FOO, msg=BAR))
|
||||
self.assertDictEqual(EMPTY_DICT, self.task_pool._task_groups)
|
||||
mock__get_cancel_kw.assert_called_once_with(BAR)
|
||||
mock__cancel_and_remove_all_from_group.assert_called_once_with(FOO, mock_group_reg, **fake_cancel_kw)
|
||||
@patch.object(pool.TaskPool, '_cancel_group_meta_tasks')
|
||||
def test__cancel_and_remove_all_from_group(self, mock__cancel_group_meta_tasks: MagicMock,
|
||||
mock_base__cancel_and_remove_all_from_group: MagicMock):
|
||||
group_name, group_reg, msg = 'xyz', MagicMock(), FOO
|
||||
self.assertIsNone(self.task_pool._cancel_and_remove_all_from_group(group_name, group_reg, msg=msg))
|
||||
mock__cancel_group_meta_tasks.assert_called_once_with(group_name)
|
||||
mock_base__cancel_and_remove_all_from_group.assert_called_once_with(group_name, group_reg, msg=msg)
|
||||
|
||||
@patch.object(pool.BaseTaskPool, '_get_cancel_kw')
|
||||
@patch.object(pool.BaseTaskPool, '_cancel_and_remove_all_from_group')
|
||||
def test_cancel_all(self, mock__cancel_and_remove_all_from_group: MagicMock, mock__get_cancel_kw: MagicMock):
|
||||
mock__get_cancel_kw.return_value = fake_cancel_kw = {'a': 10, 'b': 20}
|
||||
mock_group_reg = MagicMock()
|
||||
self.task_pool._task_groups = {FOO: mock_group_reg, BAR: mock_group_reg}
|
||||
self.assertIsNone(self.task_pool.cancel_all(BAZ))
|
||||
mock__get_cancel_kw.assert_called_once_with(BAZ)
|
||||
mock__cancel_and_remove_all_from_group.assert_has_calls([
|
||||
call(BAR, mock_group_reg, **fake_cancel_kw),
|
||||
call(FOO, mock_group_reg, **fake_cancel_kw)
|
||||
])
|
||||
@patch.object(pool.BaseTaskPool, 'cancel_group')
|
||||
async def test_cancel_group(self, mock_base_cancel_group: AsyncMock):
|
||||
group_name, msg = 'abc', 'xyz'
|
||||
await self.task_pool.cancel_group(group_name, msg=msg)
|
||||
mock_base_cancel_group.assert_awaited_once_with(group_name=group_name, msg=msg)
|
||||
|
||||
@patch.object(pool.BaseTaskPool, 'cancel_all')
|
||||
async def test_cancel_all(self, mock_base_cancel_all: AsyncMock):
|
||||
msg = 'xyz'
|
||||
await self.task_pool.cancel_all(msg=msg)
|
||||
mock_base_cancel_all.assert_awaited_once_with(msg=msg)
|
||||
|
||||
def test__pop_ended_meta_tasks(self):
|
||||
mock_task, mock_done_task1 = MagicMock(done=lambda: False), MagicMock(done=lambda: True)
|
||||
@ -406,153 +451,106 @@ class BaseTaskPoolTestCase(CommonTestCase):
|
||||
self.assertSetEqual(expected_output, output)
|
||||
self.assertDictEqual({FOO: {mock_task}}, self.task_pool._group_meta_tasks_running)
|
||||
|
||||
@patch.object(pool.BaseTaskPool, '_pop_ended_meta_tasks')
|
||||
async def test_flush(self, mock__pop_ended_meta_tasks: MagicMock):
|
||||
# Meta tasks:
|
||||
@patch.object(pool.TaskPool, '_pop_ended_meta_tasks')
|
||||
@patch.object(pool.BaseTaskPool, 'flush')
|
||||
async def test_flush(self, mock_base_flush: AsyncMock, mock__pop_ended_meta_tasks: MagicMock):
|
||||
mock_ended_meta_task = AsyncMock()
|
||||
mock__pop_ended_meta_tasks.return_value = {mock_ended_meta_task()}
|
||||
mock_cancelled_meta_task = AsyncMock(side_effect=CancelledError)
|
||||
self.task_pool._meta_tasks_cancelled = {mock_cancelled_meta_task()}
|
||||
# Actual tasks:
|
||||
mock_ended_func, mock_cancelled_func = AsyncMock(), AsyncMock(side_effect=Exception)
|
||||
self.task_pool._tasks_ended = {123: mock_ended_func()}
|
||||
self.task_pool._tasks_cancelled = {456: mock_cancelled_func()}
|
||||
|
||||
self.assertIsNone(await self.task_pool.flush(return_exceptions=True))
|
||||
|
||||
# Meta tasks:
|
||||
self.assertIsNone(await self.task_pool.flush(return_exceptions=False))
|
||||
mock_base_flush.assert_awaited_once_with(return_exceptions=False)
|
||||
mock__pop_ended_meta_tasks.assert_called_once_with()
|
||||
mock_ended_meta_task.assert_awaited_once_with()
|
||||
mock_cancelled_meta_task.assert_awaited_once_with()
|
||||
self.assertSetEqual(EMPTY_SET, self.task_pool._meta_tasks_cancelled)
|
||||
# Actual tasks:
|
||||
mock_ended_func.assert_awaited_once_with()
|
||||
mock_cancelled_func.assert_awaited_once_with()
|
||||
self.assertDictEqual(EMPTY_DICT, self.task_pool._tasks_ended)
|
||||
self.assertDictEqual(EMPTY_DICT, self.task_pool._tasks_cancelled)
|
||||
|
||||
@patch.object(pool.BaseTaskPool, 'lock')
|
||||
async def test_gather_and_close(self, mock_lock: MagicMock):
|
||||
# Meta tasks:
|
||||
@patch.object(pool.BaseTaskPool, 'gather_and_close')
|
||||
async def test_gather_and_close(self, mock_base_gather_and_close: AsyncMock):
|
||||
mock_meta_task1, mock_meta_task2 = AsyncMock(), AsyncMock()
|
||||
self.task_pool._group_meta_tasks_running = {FOO: {mock_meta_task1()}, BAR: {mock_meta_task2()}}
|
||||
mock_cancelled_meta_task = AsyncMock(side_effect=CancelledError)
|
||||
self.task_pool._meta_tasks_cancelled = {mock_cancelled_meta_task()}
|
||||
# Actual tasks:
|
||||
mock_running_func = AsyncMock()
|
||||
mock_ended_func, mock_cancelled_func = AsyncMock(), AsyncMock(side_effect=Exception)
|
||||
self.task_pool._tasks_ended = {123: mock_ended_func()}
|
||||
self.task_pool._tasks_cancelled = {456: mock_cancelled_func()}
|
||||
self.task_pool._tasks_running = {789: mock_running_func()}
|
||||
|
||||
self.assertIsNone(await self.task_pool.gather_and_close(return_exceptions=True))
|
||||
|
||||
mock_lock.assert_called_once_with()
|
||||
# Meta tasks:
|
||||
mock_base_gather_and_close.assert_awaited_once_with(return_exceptions=True)
|
||||
mock_meta_task1.assert_awaited_once_with()
|
||||
mock_meta_task2.assert_awaited_once_with()
|
||||
mock_cancelled_meta_task.assert_awaited_once_with()
|
||||
self.assertDictEqual(EMPTY_DICT, self.task_pool._group_meta_tasks_running)
|
||||
self.assertSetEqual(EMPTY_SET, self.task_pool._meta_tasks_cancelled)
|
||||
# Actual tasks:
|
||||
mock_ended_func.assert_awaited_once_with()
|
||||
mock_cancelled_func.assert_awaited_once_with()
|
||||
mock_running_func.assert_awaited_once_with()
|
||||
self.assertDictEqual(EMPTY_DICT, self.task_pool._tasks_ended)
|
||||
self.assertDictEqual(EMPTY_DICT, self.task_pool._tasks_cancelled)
|
||||
self.assertDictEqual(EMPTY_DICT, self.task_pool._tasks_running)
|
||||
self.assertTrue(self.task_pool._closed)
|
||||
|
||||
|
||||
class TaskPoolTestCase(CommonTestCase):
|
||||
TEST_CLASS = pool.TaskPool
|
||||
task_pool: pool.TaskPool
|
||||
|
||||
def test__generate_group_name(self):
|
||||
@patch.object(pool, 'datetime')
|
||||
def test__generate_group_name(self, mock_datetime: MagicMock):
|
||||
prefix, func = 'x y z', AsyncMock(__name__=BAR)
|
||||
base_name = f'{prefix}-{BAR}-group'
|
||||
self.task_pool._task_groups = {
|
||||
f'{base_name}-0': MagicMock(),
|
||||
f'{base_name}-1': MagicMock(),
|
||||
f'{base_name}-100': MagicMock(),
|
||||
}
|
||||
expected_output = f'{base_name}-2'
|
||||
output = self.task_pool._generate_group_name(prefix, func)
|
||||
dt = datetime(1776, 7, 4, 0, 0, 1)
|
||||
mock_datetime.now = MagicMock(return_value=dt)
|
||||
expected_output = f'{prefix}_{BAR}_{dt.strftime(DATETIME_FORMAT)}'
|
||||
output = pool.TaskPool._generate_group_name(prefix, func)
|
||||
self.assertEqual(expected_output, output)
|
||||
|
||||
@patch.object(pool.TaskPool, '_start_task')
|
||||
async def test__apply_spawner(self, mock__start_task: AsyncMock):
|
||||
grp_name = FOO + BAR
|
||||
mock_awaitable1, mock_awaitable2 = object(), object()
|
||||
mock_func = MagicMock(side_effect=[mock_awaitable1, Exception(), mock_awaitable2], __name__='func')
|
||||
args, kw, num = (FOO, BAR), {'a': 1, 'b': 2}, 3
|
||||
async def test__apply_num(self, mock__start_task: AsyncMock):
|
||||
group_name = FOO + BAR
|
||||
mock_awaitable = object()
|
||||
mock_func = MagicMock(return_value=mock_awaitable)
|
||||
args, kwargs, num = (FOO, BAR), {'a': 1, 'b': 2}, 3
|
||||
end_cb, cancel_cb = MagicMock(), MagicMock()
|
||||
self.assertIsNone(await self.task_pool._apply_spawner(grp_name, mock_func, args, kw, num, end_cb, cancel_cb))
|
||||
mock_func.assert_has_calls(num * [call(*args, **kw)])
|
||||
mock__start_task.assert_has_awaits([
|
||||
call(mock_awaitable1, group_name=grp_name, end_callback=end_cb, cancel_callback=cancel_cb),
|
||||
call(mock_awaitable2, group_name=grp_name, end_callback=end_cb, cancel_callback=cancel_cb),
|
||||
self.assertIsNone(await self.task_pool._apply_num(group_name, mock_func, args, kwargs, num, end_cb, cancel_cb))
|
||||
mock_func.assert_has_calls(3 * [call(*args, **kwargs)])
|
||||
mock__start_task.assert_has_awaits(3 * [
|
||||
call(mock_awaitable, group_name=group_name, end_callback=end_cb, cancel_callback=cancel_cb)
|
||||
])
|
||||
|
||||
mock_func.reset_mock(side_effect=True)
|
||||
mock_func.reset_mock()
|
||||
mock__start_task.reset_mock()
|
||||
|
||||
# Simulate cancellation while the second task is being started.
|
||||
mock__start_task.side_effect = [None, CancelledError, None]
|
||||
mock_coroutine_to_close = MagicMock()
|
||||
mock_func.side_effect = [mock_awaitable1, mock_coroutine_to_close, 'never called']
|
||||
self.assertIsNone(await self.task_pool._apply_spawner(grp_name, mock_func, args, None, num, end_cb, cancel_cb))
|
||||
mock_func.assert_has_calls(2 * [call(*args)])
|
||||
mock__start_task.assert_has_awaits([
|
||||
call(mock_awaitable1, group_name=grp_name, end_callback=end_cb, cancel_callback=cancel_cb),
|
||||
call(mock_coroutine_to_close, group_name=grp_name, end_callback=end_cb, cancel_callback=cancel_cb),
|
||||
self.assertIsNone(await self.task_pool._apply_num(group_name, mock_func, args, None, num, end_cb, cancel_cb))
|
||||
mock_func.assert_has_calls(num * [call(*args)])
|
||||
mock__start_task.assert_has_awaits(num * [
|
||||
call(mock_awaitable, group_name=group_name, end_callback=end_cb, cancel_callback=cancel_cb)
|
||||
])
|
||||
mock_coroutine_to_close.close.assert_called_once_with()
|
||||
|
||||
@patch.object(pool, 'create_task')
|
||||
@patch.object(pool.TaskPool, '_apply_spawner', new_callable=MagicMock())
|
||||
@patch.object(pool.TaskPool, '_apply_num', new_callable=MagicMock())
|
||||
@patch.object(pool, 'TaskGroupRegister')
|
||||
@patch.object(pool.TaskPool, '_generate_group_name')
|
||||
@patch.object(pool.BaseTaskPool, '_check_start')
|
||||
def test_apply(self, mock__check_start: MagicMock, mock__generate_group_name: MagicMock,
|
||||
mock_reg_cls: MagicMock, mock__apply_spawner: MagicMock, mock_create_task: MagicMock):
|
||||
async def test_apply(self, mock__check_start: MagicMock, mock__generate_group_name: MagicMock,
|
||||
mock_reg_cls: MagicMock, mock__apply_num: MagicMock, mock_create_task: MagicMock):
|
||||
mock__generate_group_name.return_value = generated_name = 'name 123'
|
||||
mock_group_reg = set_up_mock_group_register(mock_reg_cls)
|
||||
mock__apply_spawner.return_value = mock_apply_coroutine = object()
|
||||
mock_create_task.return_value = fake_task = object()
|
||||
mock__apply_num.return_value = mock_apply_coroutine = object()
|
||||
mock_task_future = AsyncMock()
|
||||
mock_create_task.return_value = mock_task_future()
|
||||
mock_func, num, group_name = MagicMock(), 3, FOO + BAR
|
||||
args, kwargs = (FOO, BAR), {'a': 1, 'b': 2}
|
||||
end_cb, cancel_cb = MagicMock(), MagicMock()
|
||||
|
||||
self.task_pool._task_groups = {group_name: 'causes error'}
|
||||
with self.assertRaises(exceptions.InvalidGroupName):
|
||||
self.task_pool.apply(mock_func, args, kwargs, num, group_name, end_cb, cancel_cb)
|
||||
mock__check_start.assert_called_once_with(function=mock_func)
|
||||
mock__apply_spawner.assert_not_called()
|
||||
mock_create_task.assert_not_called()
|
||||
|
||||
mock__check_start.reset_mock()
|
||||
self.task_pool._task_groups = {}
|
||||
|
||||
def check_assertions(_group_name, _output):
|
||||
self.assertEqual(_group_name, _output)
|
||||
mock__check_start.assert_called_once_with(function=mock_func)
|
||||
self.assertEqual(mock_group_reg, self.task_pool._task_groups[_group_name])
|
||||
mock__apply_spawner.assert_called_once_with(_group_name, mock_func, args, kwargs, num,
|
||||
end_callback=end_cb, cancel_callback=cancel_cb)
|
||||
mock_group_reg.__aenter__.assert_awaited_once_with()
|
||||
mock__apply_num.assert_called_once_with(_group_name, mock_func, args, kwargs, num, end_cb, cancel_cb)
|
||||
mock_create_task.assert_called_once_with(mock_apply_coroutine)
|
||||
self.assertSetEqual({fake_task}, self.task_pool._group_meta_tasks_running[group_name])
|
||||
mock_group_reg.__aexit__.assert_awaited_once()
|
||||
mock_task_future.assert_awaited_once_with()
|
||||
|
||||
output = self.task_pool.apply(mock_func, args, kwargs, num, group_name, end_cb, cancel_cb)
|
||||
output = await self.task_pool.apply(mock_func, args, kwargs, num, group_name, end_cb, cancel_cb)
|
||||
check_assertions(group_name, output)
|
||||
mock__generate_group_name.assert_not_called()
|
||||
|
||||
mock__check_start.reset_mock()
|
||||
self.task_pool._task_groups.clear()
|
||||
mock__apply_spawner.reset_mock()
|
||||
mock_group_reg.__aenter__.reset_mock()
|
||||
mock__apply_num.reset_mock()
|
||||
mock_create_task.reset_mock()
|
||||
mock_group_reg.__aexit__.reset_mock()
|
||||
mock_task_future = AsyncMock()
|
||||
mock_create_task.return_value = mock_task_future()
|
||||
|
||||
output = self.task_pool.apply(mock_func, args, kwargs, num, None, end_cb, cancel_cb)
|
||||
output = await self.task_pool.apply(mock_func, args, kwargs, num, None, end_cb, cancel_cb)
|
||||
check_assertions(generated_name, output)
|
||||
mock__generate_group_name.assert_called_once_with('apply', mock_func)
|
||||
|
||||
@ -574,20 +572,20 @@ class TaskPoolTestCase(CommonTestCase):
|
||||
n = 2
|
||||
mock_semaphore_cls.return_value = semaphore = Semaphore(n)
|
||||
mock__get_map_end_callback.return_value = map_cb = MagicMock()
|
||||
awaitable1, awaitable2 = 'totally an awaitable', object()
|
||||
mock_star_function.side_effect = [awaitable1, Exception(), awaitable2]
|
||||
awaitable = 'totally an awaitable'
|
||||
mock_star_function.side_effect = [awaitable, Exception(), awaitable]
|
||||
arg1, arg2, bad = 123456789, 'function argument', None
|
||||
args = [arg1, bad, arg2]
|
||||
grp_name, mock_func, stars = 'whatever', MagicMock(__name__="mock"), 3
|
||||
group_name, mock_func, stars = 'whatever', MagicMock(__name__="mock"), 3
|
||||
end_cb, cancel_cb = MagicMock(), MagicMock()
|
||||
self.assertIsNone(await self.task_pool._arg_consumer(grp_name, n, mock_func, args, stars, end_cb, cancel_cb))
|
||||
# We initialized the semaphore with a value of 2. It should have been acquired twice. We expect it be locked.
|
||||
self.assertIsNone(await self.task_pool._arg_consumer(group_name, n, mock_func, args, stars, end_cb, cancel_cb))
|
||||
# We expect the semaphore to be acquired 2 times, then be released once after the exception occurs, then
|
||||
# acquired once more is reached. Since we initialized it with a value of 2, we expect it be locked.
|
||||
self.assertTrue(semaphore.locked())
|
||||
mock_semaphore_cls.assert_called_once_with(n)
|
||||
mock__get_map_end_callback.assert_called_once_with(semaphore, actual_end_callback=end_cb)
|
||||
mock__start_task.assert_has_awaits([
|
||||
call(awaitable1, group_name=grp_name, ignore_lock=True, end_callback=map_cb, cancel_callback=cancel_cb),
|
||||
call(awaitable2, group_name=grp_name, ignore_lock=True, end_callback=map_cb, cancel_callback=cancel_cb),
|
||||
mock__start_task.assert_has_awaits(2 * [
|
||||
call(awaitable, group_name=group_name, ignore_lock=True, end_callback=map_cb, cancel_callback=cancel_cb)
|
||||
])
|
||||
mock_star_function.assert_has_calls([
|
||||
call(mock_func, arg1, arg_stars=stars),
|
||||
@ -598,56 +596,23 @@ class TaskPoolTestCase(CommonTestCase):
|
||||
mock_semaphore_cls.reset_mock()
|
||||
mock__get_map_end_callback.reset_mock()
|
||||
mock__start_task.reset_mock()
|
||||
mock_star_function.reset_mock(side_effect=True)
|
||||
mock_star_function.reset_mock()
|
||||
|
||||
# With a CancelledError thrown while acquiring the semaphore:
|
||||
mock_acquire = AsyncMock(side_effect=[True, CancelledError])
|
||||
mock_semaphore_cls.return_value = mock_semaphore = MagicMock(acquire=mock_acquire)
|
||||
mock_star_function.return_value = mock_coroutine = MagicMock()
|
||||
arg_it = iter(arg for arg in (arg1, arg2, FOO))
|
||||
self.assertIsNone(await self.task_pool._arg_consumer(grp_name, n, mock_func, arg_it, stars, end_cb, cancel_cb))
|
||||
# With a CancelledError thrown while starting a task:
|
||||
mock_semaphore_cls.return_value = semaphore = Semaphore(1)
|
||||
mock_star_function.side_effect = CancelledError()
|
||||
self.assertIsNone(await self.task_pool._arg_consumer(group_name, n, mock_func, args, stars, end_cb, cancel_cb))
|
||||
self.assertFalse(semaphore.locked())
|
||||
mock_semaphore_cls.assert_called_once_with(n)
|
||||
mock__get_map_end_callback.assert_called_once_with(mock_semaphore, actual_end_callback=end_cb)
|
||||
mock_star_function.assert_has_calls([
|
||||
call(mock_func, arg1, arg_stars=stars),
|
||||
call(mock_func, arg2, arg_stars=stars)
|
||||
])
|
||||
mock_acquire.assert_has_awaits([call(), call()])
|
||||
mock__start_task.assert_awaited_once_with(mock_coroutine, group_name=grp_name, ignore_lock=True,
|
||||
end_callback=map_cb, cancel_callback=cancel_cb)
|
||||
mock_coroutine.close.assert_called_once_with()
|
||||
mock_semaphore.release.assert_not_called()
|
||||
self.assertEqual(FOO, next(arg_it))
|
||||
|
||||
mock_acquire.reset_mock(side_effect=True)
|
||||
mock_semaphore_cls.reset_mock()
|
||||
mock__get_map_end_callback.reset_mock()
|
||||
mock__start_task.reset_mock()
|
||||
mock_star_function.reset_mock(side_effect=True)
|
||||
|
||||
# With a CancelledError thrown while starting the task:
|
||||
mock__start_task.side_effect = [None, CancelledError]
|
||||
arg_it = iter(arg for arg in (arg1, arg2, FOO))
|
||||
self.assertIsNone(await self.task_pool._arg_consumer(grp_name, n, mock_func, arg_it, stars, end_cb, cancel_cb))
|
||||
mock_semaphore_cls.assert_called_once_with(n)
|
||||
mock__get_map_end_callback.assert_called_once_with(mock_semaphore, actual_end_callback=end_cb)
|
||||
mock_star_function.assert_has_calls([
|
||||
call(mock_func, arg1, arg_stars=stars),
|
||||
call(mock_func, arg2, arg_stars=stars)
|
||||
])
|
||||
mock_acquire.assert_has_awaits([call(), call()])
|
||||
mock__start_task.assert_has_awaits(2 * [
|
||||
call(mock_coroutine, group_name=grp_name, ignore_lock=True, end_callback=map_cb, cancel_callback=cancel_cb)
|
||||
])
|
||||
mock_coroutine.close.assert_called_once_with()
|
||||
mock_semaphore.release.assert_called_once_with()
|
||||
self.assertEqual(FOO, next(arg_it))
|
||||
mock__get_map_end_callback.assert_called_once_with(semaphore, actual_end_callback=end_cb)
|
||||
mock__start_task.assert_not_called()
|
||||
mock_star_function.assert_called_once_with(mock_func, arg1, arg_stars=stars)
|
||||
|
||||
@patch.object(pool, 'create_task')
|
||||
@patch.object(pool.TaskPool, '_arg_consumer', new_callable=MagicMock)
|
||||
@patch.object(pool, 'TaskGroupRegister')
|
||||
@patch.object(pool.BaseTaskPool, '_check_start')
|
||||
def test__map(self, mock__check_start: MagicMock, mock_reg_cls: MagicMock, mock__arg_consumer: MagicMock,
|
||||
async def test__map(self, mock__check_start: MagicMock, mock_reg_cls: MagicMock, mock__arg_consumer: MagicMock,
|
||||
mock_create_task: MagicMock):
|
||||
mock_group_reg = set_up_mock_group_register(mock_reg_cls)
|
||||
mock__arg_consumer.return_value = fake_consumer = object()
|
||||
@ -658,7 +623,7 @@ class TaskPoolTestCase(CommonTestCase):
|
||||
end_cb, cancel_cb = MagicMock(), MagicMock()
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
self.task_pool._map(group_name, n, func, arg_iter, stars, end_cb, cancel_cb)
|
||||
await self.task_pool._map(group_name, n, func, arg_iter, stars, end_cb, cancel_cb)
|
||||
mock__check_start.assert_called_once_with(function=func)
|
||||
|
||||
mock__check_start.reset_mock()
|
||||
@ -667,79 +632,81 @@ class TaskPoolTestCase(CommonTestCase):
|
||||
self.task_pool._task_groups = {group_name: MagicMock()}
|
||||
|
||||
with self.assertRaises(exceptions.InvalidGroupName):
|
||||
self.task_pool._map(group_name, n, func, arg_iter, stars, end_cb, cancel_cb)
|
||||
await self.task_pool._map(group_name, n, func, arg_iter, stars, end_cb, cancel_cb)
|
||||
mock__check_start.assert_called_once_with(function=func)
|
||||
|
||||
mock__check_start.reset_mock()
|
||||
|
||||
self.task_pool._task_groups.clear()
|
||||
|
||||
self.assertIsNone(self.task_pool._map(group_name, n, func, arg_iter, stars, end_cb, cancel_cb))
|
||||
self.assertIsNone(await self.task_pool._map(group_name, n, func, arg_iter, stars, end_cb, cancel_cb))
|
||||
mock__check_start.assert_called_once_with(function=func)
|
||||
mock_reg_cls.assert_called_once_with()
|
||||
self.task_pool._task_groups[group_name] = mock_group_reg
|
||||
mock_group_reg.__aenter__.assert_awaited_once_with()
|
||||
mock__arg_consumer.assert_called_once_with(group_name, n, func, arg_iter, stars,
|
||||
end_callback=end_cb, cancel_callback=cancel_cb)
|
||||
mock_create_task.assert_called_once_with(fake_consumer)
|
||||
self.assertSetEqual({fake_task}, self.task_pool._group_meta_tasks_running[group_name])
|
||||
mock_group_reg.__aexit__.assert_awaited_once()
|
||||
|
||||
@patch.object(pool.TaskPool, '_map')
|
||||
@patch.object(pool.TaskPool, '_generate_group_name')
|
||||
def test_map(self, mock__generate_group_name: MagicMock, mock__map: MagicMock):
|
||||
async def test_map(self, mock__generate_group_name: MagicMock, mock__map: AsyncMock):
|
||||
mock__generate_group_name.return_value = generated_name = 'name 1 2 3'
|
||||
mock_func = MagicMock()
|
||||
arg_iter, num_concurrent, group_name = (FOO, BAR, 1, 2, 3), 2, FOO + BAR
|
||||
end_cb, cancel_cb = MagicMock(), MagicMock()
|
||||
output = self.task_pool.map(mock_func, arg_iter, num_concurrent, group_name, end_cb, cancel_cb)
|
||||
output = await self.task_pool.map(mock_func, arg_iter, num_concurrent, group_name, end_cb, cancel_cb)
|
||||
self.assertEqual(group_name, output)
|
||||
mock__map.assert_called_once_with(group_name, num_concurrent, mock_func, arg_iter, 0,
|
||||
mock__map.assert_awaited_once_with(group_name, num_concurrent, mock_func, arg_iter, 0,
|
||||
end_callback=end_cb, cancel_callback=cancel_cb)
|
||||
mock__generate_group_name.assert_not_called()
|
||||
|
||||
mock__map.reset_mock()
|
||||
output = self.task_pool.map(mock_func, arg_iter, num_concurrent, None, end_cb, cancel_cb)
|
||||
output = await self.task_pool.map(mock_func, arg_iter, num_concurrent, None, end_cb, cancel_cb)
|
||||
self.assertEqual(generated_name, output)
|
||||
mock__map.assert_called_once_with(generated_name, num_concurrent, mock_func, arg_iter, 0,
|
||||
mock__map.assert_awaited_once_with(generated_name, num_concurrent, mock_func, arg_iter, 0,
|
||||
end_callback=end_cb, cancel_callback=cancel_cb)
|
||||
mock__generate_group_name.assert_called_once_with('map', mock_func)
|
||||
|
||||
@patch.object(pool.TaskPool, '_map')
|
||||
@patch.object(pool.TaskPool, '_generate_group_name')
|
||||
def test_starmap(self, mock__generate_group_name: MagicMock, mock__map: MagicMock):
|
||||
async def test_starmap(self, mock__generate_group_name: MagicMock, mock__map: AsyncMock):
|
||||
mock__generate_group_name.return_value = generated_name = 'name 1 2 3'
|
||||
mock_func = MagicMock()
|
||||
args_iter, num_concurrent, group_name = ([FOO], [BAR]), 2, FOO + BAR
|
||||
end_cb, cancel_cb = MagicMock(), MagicMock()
|
||||
output = self.task_pool.starmap(mock_func, args_iter, num_concurrent, group_name, end_cb, cancel_cb)
|
||||
output = await self.task_pool.starmap(mock_func, args_iter, num_concurrent, group_name, end_cb, cancel_cb)
|
||||
self.assertEqual(group_name, output)
|
||||
mock__map.assert_called_once_with(group_name, num_concurrent, mock_func, args_iter, 1,
|
||||
mock__map.assert_awaited_once_with(group_name, num_concurrent, mock_func, args_iter, 1,
|
||||
end_callback=end_cb, cancel_callback=cancel_cb)
|
||||
mock__generate_group_name.assert_not_called()
|
||||
|
||||
mock__map.reset_mock()
|
||||
output = self.task_pool.starmap(mock_func, args_iter, num_concurrent, None, end_cb, cancel_cb)
|
||||
output = await self.task_pool.starmap(mock_func, args_iter, num_concurrent, None, end_cb, cancel_cb)
|
||||
self.assertEqual(generated_name, output)
|
||||
mock__map.assert_called_once_with(generated_name, num_concurrent, mock_func, args_iter, 1,
|
||||
mock__map.assert_awaited_once_with(generated_name, num_concurrent, mock_func, args_iter, 1,
|
||||
end_callback=end_cb, cancel_callback=cancel_cb)
|
||||
mock__generate_group_name.assert_called_once_with('starmap', mock_func)
|
||||
|
||||
@patch.object(pool.TaskPool, '_map')
|
||||
@patch.object(pool.TaskPool, '_generate_group_name')
|
||||
async def test_doublestarmap(self, mock__generate_group_name: MagicMock, mock__map: MagicMock):
|
||||
async def test_doublestarmap(self, mock__generate_group_name: MagicMock, mock__map: AsyncMock):
|
||||
mock__generate_group_name.return_value = generated_name = 'name 1 2 3'
|
||||
mock_func = MagicMock()
|
||||
kw_iter, num_concurrent, group_name = [{'a': FOO}, {'a': BAR}], 2, FOO + BAR
|
||||
end_cb, cancel_cb = MagicMock(), MagicMock()
|
||||
output = self.task_pool.doublestarmap(mock_func, kw_iter, num_concurrent, group_name, end_cb, cancel_cb)
|
||||
output = await self.task_pool.doublestarmap(mock_func, kw_iter, num_concurrent, group_name, end_cb, cancel_cb)
|
||||
self.assertEqual(group_name, output)
|
||||
mock__map.assert_called_once_with(group_name, num_concurrent, mock_func, kw_iter, 2,
|
||||
mock__map.assert_awaited_once_with(group_name, num_concurrent, mock_func, kw_iter, 2,
|
||||
end_callback=end_cb, cancel_callback=cancel_cb)
|
||||
mock__generate_group_name.assert_not_called()
|
||||
|
||||
mock__map.reset_mock()
|
||||
output = self.task_pool.doublestarmap(mock_func, kw_iter, num_concurrent, None, end_cb, cancel_cb)
|
||||
output = await self.task_pool.doublestarmap(mock_func, kw_iter, num_concurrent, None, end_cb, cancel_cb)
|
||||
self.assertEqual(generated_name, output)
|
||||
mock__map.assert_called_once_with(generated_name, num_concurrent, mock_func, kw_iter, 2,
|
||||
mock__map.assert_awaited_once_with(generated_name, num_concurrent, mock_func, kw_iter, 2,
|
||||
end_callback=end_cb, cancel_callback=cancel_cb)
|
||||
mock__generate_group_name.assert_called_once_with('doublestarmap', mock_func)
|
||||
|
||||
@ -755,15 +722,13 @@ class SimpleTaskPoolTestCase(CommonTestCase):
|
||||
TEST_POOL_CANCEL_CB = MagicMock()
|
||||
|
||||
def get_task_pool_init_params(self) -> dict:
|
||||
params = super().get_task_pool_init_params()
|
||||
params.update({
|
||||
return super().get_task_pool_init_params() | {
|
||||
'func': self.TEST_POOL_FUNC,
|
||||
'args': self.TEST_POOL_ARGS,
|
||||
'kwargs': self.TEST_POOL_KWARGS,
|
||||
'end_callback': self.TEST_POOL_END_CB,
|
||||
'cancel_callback': self.TEST_POOL_CANCEL_CB,
|
||||
})
|
||||
return params
|
||||
}
|
||||
|
||||
def setUp(self) -> None:
|
||||
self.base_class_init_patcher = patch.object(pool.BaseTaskPool, '__init__')
|
||||
@ -772,7 +737,6 @@ class SimpleTaskPoolTestCase(CommonTestCase):
|
||||
|
||||
def tearDown(self) -> None:
|
||||
self.base_class_init_patcher.stop()
|
||||
super().tearDown()
|
||||
|
||||
def test_init(self):
|
||||
self.assertEqual(self.TEST_POOL_FUNC, self.task_pool._func)
|
||||
@ -789,54 +753,23 @@ class SimpleTaskPoolTestCase(CommonTestCase):
|
||||
self.assertEqual(self.TEST_POOL_FUNC.__name__, self.task_pool.func_name)
|
||||
|
||||
@patch.object(pool.SimpleTaskPool, '_start_task')
|
||||
async def test__start_num(self, mock__start_task: AsyncMock):
|
||||
group_name = FOO + BAR + 'abc'
|
||||
mock_awaitable1, mock_awaitable2 = object(), object()
|
||||
self.task_pool._func = MagicMock(side_effect=[mock_awaitable1, Exception(), mock_awaitable2], __name__='func')
|
||||
num = 3
|
||||
self.assertIsNone(await self.task_pool._start_num(num, group_name))
|
||||
self.task_pool._func.assert_has_calls(num * [call(*self.task_pool._args, **self.task_pool._kwargs)])
|
||||
call_kw = {
|
||||
'group_name': group_name,
|
||||
'end_callback': self.task_pool._end_callback,
|
||||
'cancel_callback': self.task_pool._cancel_callback
|
||||
}
|
||||
mock__start_task.assert_has_awaits([call(mock_awaitable1, **call_kw), call(mock_awaitable2, **call_kw)])
|
||||
async def test__start_one(self, mock__start_task: AsyncMock):
|
||||
mock__start_task.return_value = expected_output = 99
|
||||
self.task_pool._func = MagicMock(return_value=BAR)
|
||||
output = await self.task_pool._start_one()
|
||||
self.assertEqual(expected_output, output)
|
||||
self.task_pool._func.assert_called_once_with(*self.task_pool._args, **self.task_pool._kwargs)
|
||||
mock__start_task.assert_awaited_once_with(BAR, end_callback=self.task_pool._end_callback,
|
||||
cancel_callback=self.task_pool._cancel_callback)
|
||||
|
||||
self.task_pool._func.reset_mock(side_effect=True)
|
||||
mock__start_task.reset_mock()
|
||||
|
||||
# Simulate cancellation while the second task is being started.
|
||||
mock__start_task.side_effect = [None, CancelledError, None]
|
||||
mock_coroutine_to_close = MagicMock()
|
||||
self.task_pool._func.side_effect = [mock_awaitable1, mock_coroutine_to_close, 'never called']
|
||||
self.assertIsNone(await self.task_pool._start_num(num, group_name))
|
||||
self.task_pool._func.assert_has_calls(2 * [call(*self.task_pool._args, **self.task_pool._kwargs)])
|
||||
mock__start_task.assert_has_awaits([call(mock_awaitable1, **call_kw), call(mock_coroutine_to_close, **call_kw)])
|
||||
mock_coroutine_to_close.close.assert_called_once_with()
|
||||
|
||||
@patch.object(pool, 'create_task')
|
||||
@patch.object(pool.SimpleTaskPool, '_start_num', new_callable=MagicMock())
|
||||
@patch.object(pool, 'TaskGroupRegister')
|
||||
@patch.object(pool.BaseTaskPool, '_check_start')
|
||||
def test_start(self, mock__check_start: MagicMock, mock_reg_cls: MagicMock, mock__start_num: AsyncMock,
|
||||
mock_create_task: MagicMock):
|
||||
mock_group_reg = set_up_mock_group_register(mock_reg_cls)
|
||||
mock__start_num.return_value = mock_start_num_coroutine = object()
|
||||
mock_create_task.return_value = fake_task = object()
|
||||
self.task_pool._task_groups = {}
|
||||
self.task_pool._group_meta_tasks_running = {}
|
||||
@patch.object(pool.SimpleTaskPool, '_start_one')
|
||||
async def test_start(self, mock__start_one: AsyncMock):
|
||||
mock__start_one.return_value = FOO
|
||||
num = 5
|
||||
self.task_pool._start_calls = 42
|
||||
expected_group_name = 'start-group-42'
|
||||
output = self.task_pool.start(num)
|
||||
self.assertEqual(expected_group_name, output)
|
||||
mock__check_start.assert_called_once_with(function=self.TEST_POOL_FUNC)
|
||||
self.assertEqual(43, self.task_pool._start_calls)
|
||||
self.assertEqual(mock_group_reg, self.task_pool._task_groups[expected_group_name])
|
||||
mock__start_num.assert_called_once_with(num, expected_group_name)
|
||||
mock_create_task.assert_called_once_with(mock_start_num_coroutine)
|
||||
self.assertSetEqual({fake_task}, self.task_pool._group_meta_tasks_running[expected_group_name])
|
||||
output = await self.task_pool.start(num)
|
||||
expected_output = num * [FOO]
|
||||
self.assertListEqual(expected_output, output)
|
||||
mock__start_one.assert_has_awaits(num * [call()])
|
||||
|
||||
@patch.object(pool.SimpleTaskPool, 'cancel')
|
||||
def test_stop(self, mock_cancel: MagicMock):
|
||||
|
@ -39,11 +39,12 @@ async def work(n: int) -> None:
|
||||
|
||||
async def main() -> None:
|
||||
pool = SimpleTaskPool(work, args=(5,)) # initializes the pool; no work is being done yet
|
||||
pool.start(3) # launches work tasks 0, 1, and 2
|
||||
await pool.start(3) # launches work tasks 0, 1, and 2
|
||||
await asyncio.sleep(1.5) # lets the tasks work for a bit
|
||||
pool.start(1) # launches work task 3
|
||||
await pool.start(1) # launches work task 3
|
||||
await asyncio.sleep(1.5) # lets the tasks work for a bit
|
||||
pool.stop(2) # cancels tasks 3 and 2 (LIFO order)
|
||||
pool.lock() # required for the last line
|
||||
await pool.gather_and_close() # awaits all tasks, then flushes the pool
|
||||
|
||||
|
||||
@ -122,7 +123,7 @@ async def main() -> None:
|
||||
pool = TaskPool(3)
|
||||
# Queue up two tasks (IDs 0 and 1) to run concurrently (with the same keyword-arguments).
|
||||
print("> Called `apply`")
|
||||
pool.apply(work, kwargs={'start': 100, 'stop': 200, 'step': 10}, num=2)
|
||||
await pool.apply(work, kwargs={'start': 100, 'stop': 200, 'step': 10}, num=2)
|
||||
# Let the tasks work for a bit.
|
||||
await asyncio.sleep(1.5)
|
||||
# Now, let us enqueue four more tasks (which will receive IDs 2, 3, 4, and 5), each created with different
|
||||
@ -134,9 +135,11 @@ async def main() -> None:
|
||||
# Once there is room in the pool again, the third and fourth will each start (with IDs 4 and 5)
|
||||
# only once there is room in the pool and no more than one other task of these new ones is running.
|
||||
args_list = [(0, 10), (10, 20), (20, 30), (30, 40)]
|
||||
pool.starmap(other_work, args_list, num_concurrent=2)
|
||||
await pool.starmap(other_work, args_list, num_concurrent=2)
|
||||
print("> Called `starmap`")
|
||||
# We block, until all tasks have ended.
|
||||
# Now we lock the pool, so that we can safely await all our tasks.
|
||||
pool.lock()
|
||||
# Finally, we block, until all tasks have ended.
|
||||
print("> Calling `gather_and_close`...")
|
||||
await pool.gather_and_close()
|
||||
print("> Done.")
|
||||
|
@ -67,7 +67,7 @@ async def main() -> None:
|
||||
for item in range(100):
|
||||
q.put_nowait(item)
|
||||
pool = SimpleTaskPool(worker, args=(q,)) # initializes the pool
|
||||
pool.start(3) # launches three worker tasks
|
||||
await pool.start(3) # launches three worker tasks
|
||||
control_server_task = await TCPControlServer(pool, host='127.0.0.1', port=9999).serve_forever()
|
||||
# We block until `.task_done()` has been called once by our workers for every item placed into the queue.
|
||||
await q.join()
|
||||
@ -75,6 +75,7 @@ async def main() -> None:
|
||||
control_server_task.cancel()
|
||||
# Since our workers should now be stuck waiting for more items to pick from the queue, but no items are left,
|
||||
# we can now safely cancel their tasks.
|
||||
pool.lock()
|
||||
pool.stop_all()
|
||||
# Finally, we allow for all tasks to do their cleanup (as if they need to do any) upon being cancelled.
|
||||
# We block until they all return or raise an exception, but since we are not interested in any of their exceptions,
|
||||
|
Reference in New Issue
Block a user