generated from daniil-berg/boilerplate-py
Compare commits
8 Commits
Author | SHA1 | Date | |
---|---|---|---|
410e73e68b | |||
727f0b7c8b | |||
63aab1a8f6 | |||
d48b20818f | |||
3c69740c8d | |||
586023f722 | |||
16eda31648 | |||
99ece436de |
@ -1,6 +1,6 @@
|
||||
[metadata]
|
||||
name = asyncio-taskpool
|
||||
version = 0.0.3
|
||||
version = 0.1.7
|
||||
author = Daniil Fajnberg
|
||||
author_email = mail@daniil.fajnberg.de
|
||||
description = Dynamically manage pools of asyncio tasks
|
||||
|
@ -3,6 +3,6 @@ MSG_BYTES = 1024
|
||||
CMD_START = 'start'
|
||||
CMD_STOP = 'stop'
|
||||
CMD_STOP_ALL = 'stop_all'
|
||||
CMD_SIZE = 'size'
|
||||
CMD_NUM_RUNNING = 'num_running'
|
||||
CMD_FUNC = 'func'
|
||||
CLIENT_EXIT = 'exit'
|
||||
|
@ -1,4 +1,5 @@
|
||||
from asyncio.coroutines import iscoroutinefunction
|
||||
from asyncio.queues import Queue
|
||||
from typing import Any, Optional
|
||||
|
||||
from .types import T, AnyCallableT, ArgsT, KwArgsT
|
||||
@ -22,3 +23,7 @@ def star_function(function: AnyCallableT, arg: Any, arg_stars: int = 0) -> T:
|
||||
if arg_stars == 2:
|
||||
return function(**arg)
|
||||
raise ValueError(f"Invalid argument arg_stars={arg_stars}; must be 0, 1, or 2.")
|
||||
|
||||
|
||||
async def join_queue(q: Queue) -> None:
|
||||
await q.join()
|
||||
|
@ -3,13 +3,14 @@ from asyncio import gather
|
||||
from asyncio.coroutines import iscoroutine, iscoroutinefunction
|
||||
from asyncio.exceptions import CancelledError
|
||||
from asyncio.locks import Event, Semaphore
|
||||
from asyncio.queues import Queue, QueueEmpty
|
||||
from asyncio.tasks import Task, create_task
|
||||
from functools import partial
|
||||
from math import inf
|
||||
from typing import Any, Awaitable, Dict, Iterable, Iterator, List
|
||||
|
||||
from . import exceptions
|
||||
from .helpers import execute_optional, star_function
|
||||
from .helpers import execute_optional, star_function, join_queue
|
||||
from .types import ArgsT, KwArgsT, CoroutineFunc, EndCallbackT, CancelCallbackT
|
||||
|
||||
|
||||
@ -39,8 +40,7 @@ class BaseTaskPool:
|
||||
self._num_ended: int = 0
|
||||
self._idx: int = self._add_pool(self)
|
||||
self._name: str = name
|
||||
self._all_tasks_known_flag: Event = Event()
|
||||
self._all_tasks_known_flag.set()
|
||||
self._before_gathering: List[Awaitable] = []
|
||||
self._interrupt_flag: Event = Event()
|
||||
log.debug("%s initialized", str(self))
|
||||
|
||||
@ -201,12 +201,13 @@ class BaseTaskPool:
|
||||
If `True`, even if the pool is closed, the task will still be started.
|
||||
end_callback (optional):
|
||||
A callback to execute after the task has ended.
|
||||
It is run with the `task_id` as its only positional argument.
|
||||
It is run with the task's ID as its only positional argument.
|
||||
cancel_callback (optional):
|
||||
A callback to execute after cancellation of the task.
|
||||
It is run with the `task_id` as its only positional argument.
|
||||
It is run with the task's ID as its only positional argument.
|
||||
|
||||
Raises:
|
||||
`asyncio_taskpool.exceptions.NotCoroutine` if `awaitable` is not a coroutine.
|
||||
`asyncio_taskpool.exceptions.PoolIsClosed` if the pool has been closed and `ignore_closed` is `False`.
|
||||
"""
|
||||
if not iscoroutine(awaitable):
|
||||
@ -296,7 +297,8 @@ class BaseTaskPool:
|
||||
return_exceptions (optional): Passed directly into `gather`.
|
||||
"""
|
||||
results = await gather(*self._ended.values(), *self._cancelled.values(), return_exceptions=return_exceptions)
|
||||
self._ended = self._cancelled = {}
|
||||
self._ended.clear()
|
||||
self._cancelled.clear()
|
||||
if self._interrupt_flag.is_set():
|
||||
self._interrupt_flag.clear()
|
||||
return results
|
||||
@ -329,86 +331,370 @@ class BaseTaskPool:
|
||||
"""
|
||||
if self._open:
|
||||
raise exceptions.PoolStillOpen("Pool must be closed, before tasks can be gathered")
|
||||
await self._all_tasks_known_flag.wait()
|
||||
await gather(*self._before_gathering)
|
||||
results = await gather(*self._ended.values(), *self._cancelled.values(), *self._running.values(),
|
||||
return_exceptions=return_exceptions)
|
||||
self._ended = self._cancelled = self._running = {}
|
||||
self._ended.clear()
|
||||
self._cancelled.clear()
|
||||
self._running.clear()
|
||||
self._before_gathering.clear()
|
||||
if self._interrupt_flag.is_set():
|
||||
self._interrupt_flag.clear()
|
||||
return results
|
||||
|
||||
|
||||
class TaskPool(BaseTaskPool):
|
||||
"""
|
||||
General task pool class.
|
||||
Attempts to somewhat emulate part of the interface of `multiprocessing.pool.Pool` from the stdlib.
|
||||
|
||||
A `TaskPool` instance can manage an arbitrary number of concurrent tasks from any coroutine function.
|
||||
Tasks in the pool can all belong to the same coroutine function,
|
||||
but they can also come from any number of different and unrelated coroutine functions.
|
||||
|
||||
As long as there is room in the pool, more tasks can be added. (By default, there is no pool size limit.)
|
||||
Each task started in the pool receives a unique ID, which can be used to cancel specific tasks at any moment.
|
||||
|
||||
Adding tasks blocks **only if** the pool is full at that moment.
|
||||
"""
|
||||
|
||||
async def _apply_one(self, func: CoroutineFunc, args: ArgsT = (), kwargs: KwArgsT = None,
|
||||
end_callback: EndCallbackT = None, cancel_callback: CancelCallbackT = None) -> int:
|
||||
"""
|
||||
Creates a coroutine with the supplied arguments and runs it as a new task in the pool.
|
||||
|
||||
This method blocks, **only if** the pool is full.
|
||||
|
||||
Args:
|
||||
func:
|
||||
The coroutine function to be run as a task within the task pool.
|
||||
args (optional):
|
||||
The positional arguments to pass into the function call.
|
||||
kwargs (optional):
|
||||
The keyword-arguments to pass into the function call.
|
||||
end_callback (optional):
|
||||
A callback to execute after the task has ended.
|
||||
It is run with the task's ID as its only positional argument.
|
||||
cancel_callback (optional):
|
||||
A callback to execute after cancellation of the task.
|
||||
It is run with the task's ID as its only positional argument.
|
||||
|
||||
Returns:
|
||||
The newly spawned task's ID within the pool.
|
||||
"""
|
||||
if kwargs is None:
|
||||
kwargs = {}
|
||||
return await self._start_task(func(*args, **kwargs), end_callback=end_callback, cancel_callback=cancel_callback)
|
||||
|
||||
async def apply(self, func: CoroutineFunc, args: ArgsT = (), kwargs: KwArgsT = None, num: int = 1,
|
||||
end_callback: EndCallbackT = None, cancel_callback: CancelCallbackT = None) -> List[int]:
|
||||
"""
|
||||
Creates an arbitrary number of coroutines with the supplied arguments and runs them as new tasks in the pool.
|
||||
Each coroutine looks like `func(*args, **kwargs)`.
|
||||
|
||||
This method blocks, **only if** there is not enough room in the pool for the desired number of new tasks.
|
||||
|
||||
Args:
|
||||
func:
|
||||
The coroutine function to use for spawning the new tasks within the task pool.
|
||||
args (optional):
|
||||
The positional arguments to pass into each function call.
|
||||
kwargs (optional):
|
||||
The keyword-arguments to pass into each function call.
|
||||
num (optional):
|
||||
The number of tasks to spawn with the specified parameters.
|
||||
end_callback (optional):
|
||||
A callback to execute after a task has ended.
|
||||
It is run with the task's ID as its only positional argument.
|
||||
cancel_callback (optional):
|
||||
A callback to execute after cancellation of a task.
|
||||
It is run with the task's ID as its only positional argument.
|
||||
|
||||
Returns:
|
||||
The newly spawned tasks' IDs within the pool as a list of integers.
|
||||
|
||||
Raises:
|
||||
`NotCoroutine` if `func` is not a coroutine function.
|
||||
`PoolIsClosed` if the pool has been closed already.
|
||||
"""
|
||||
ids = await gather(*(self._apply_one(func, args, kwargs, end_callback, cancel_callback) for _ in range(num)))
|
||||
# TODO: for some reason PyCharm wrongly claims that `gather` returns a tuple of exceptions
|
||||
assert isinstance(ids, list)
|
||||
return ids
|
||||
|
||||
async def _next_callback(self, task_id: int, func: CoroutineFunc, args_iter: Iterator[Any], arg_stars: int = 0,
|
||||
end_callback: EndCallbackT = None, cancel_callback: CancelCallbackT = None) -> None:
|
||||
reached_end = await self._start_next_task(func, args_iter, arg_stars=arg_stars,
|
||||
end_callback=end_callback, cancel_callback=cancel_callback)
|
||||
if reached_end:
|
||||
self._all_tasks_known_flag.set()
|
||||
await execute_optional(end_callback, args=(task_id,))
|
||||
async def _queue_producer(self, q: Queue, args_iter: Iterator[Any]) -> None:
|
||||
"""
|
||||
Keeps the arguments queue from `_map()` full as long as the iterator has elements.
|
||||
If the `_interrupt_flag` gets set, the loop ends prematurely.
|
||||
|
||||
async def _start_next_task(self, func: CoroutineFunc, args_iter: Iterator[Any], arg_stars: int = 0,
|
||||
end_callback: EndCallbackT = None, cancel_callback: CancelCallbackT = None) -> bool:
|
||||
if self._interrupt_flag.is_set():
|
||||
return True
|
||||
Args:
|
||||
q:
|
||||
The queue of function arguments to consume for starting the next task.
|
||||
args_iter:
|
||||
The iterator of function arguments to put into the queue.
|
||||
"""
|
||||
for arg in args_iter:
|
||||
if self._interrupt_flag.is_set():
|
||||
break
|
||||
await q.put(arg) # This blocks as long as the queue is full.
|
||||
|
||||
async def _queue_consumer(self, q: Queue, first_batch_started: Event, func: CoroutineFunc, arg_stars: int = 0,
|
||||
end_callback: EndCallbackT = None, cancel_callback: CancelCallbackT = None) -> None:
|
||||
"""
|
||||
Wrapper around the `_start_task()` taking the next element from the arguments queue set up in `_map()`.
|
||||
Partially constructs the `_queue_callback` function with the same arguments.
|
||||
|
||||
Args:
|
||||
q:
|
||||
The queue of function arguments to consume for starting the next task.
|
||||
first_batch_started:
|
||||
The event flag to wait for, before launching the next consumer.
|
||||
It can only set by the `_map()` method, which happens after the first batch of task has been started.
|
||||
func:
|
||||
The coroutine function to use for spawning the tasks within the task pool.
|
||||
arg_stars (optional):
|
||||
Whether or not to unpack an element from `q` using stars; must be 0, 1, or 2.
|
||||
end_callback (optional):
|
||||
The actual callback specified to execute after the task (and the next one) has ended.
|
||||
It is run with the task's ID as its only positional argument.
|
||||
cancel_callback (optional):
|
||||
The callback that was specified to execute after cancellation of the task (and the next one).
|
||||
It is run with the task's ID as its only positional argument.
|
||||
"""
|
||||
try:
|
||||
arg = q.get_nowait()
|
||||
except QueueEmpty:
|
||||
return
|
||||
try:
|
||||
await self._start_task(
|
||||
star_function(func, next(args_iter), arg_stars=arg_stars),
|
||||
star_function(func, arg, arg_stars=arg_stars),
|
||||
ignore_closed=True,
|
||||
end_callback=partial(TaskPool._next_callback, self, func=func, args_iter=args_iter, arg_stars=arg_stars,
|
||||
end_callback=end_callback, cancel_callback=cancel_callback),
|
||||
end_callback=partial(TaskPool._queue_callback, self, q=q, first_batch_started=first_batch_started,
|
||||
func=func, arg_stars=arg_stars, end_callback=end_callback,
|
||||
cancel_callback=cancel_callback),
|
||||
cancel_callback=cancel_callback
|
||||
)
|
||||
finally:
|
||||
q.task_done()
|
||||
|
||||
async def _queue_callback(self, task_id: int, q: Queue, first_batch_started: Event, func: CoroutineFunc,
|
||||
arg_stars: int = 0, end_callback: EndCallbackT = None,
|
||||
cancel_callback: CancelCallbackT = None) -> None:
|
||||
"""
|
||||
Wrapper around an end callback function passed into the `_map()` method.
|
||||
Triggers the next `_queue_consumer` with the same arguments.
|
||||
|
||||
Args:
|
||||
task_id:
|
||||
The ID of the ending task.
|
||||
q:
|
||||
The queue of function arguments to consume for starting the next task.
|
||||
first_batch_started:
|
||||
The event flag to wait for, before launching the next consumer.
|
||||
It can only set by the `_map()` method, which happens after the first batch of task has been started.
|
||||
func:
|
||||
The coroutine function to use for spawning the tasks within the task pool.
|
||||
arg_stars (optional):
|
||||
Whether or not to unpack an element from `q` using stars; must be 0, 1, or 2.
|
||||
end_callback (optional):
|
||||
The actual callback specified to execute after the task (and the next one) has ended.
|
||||
It is run with the `task_id` as its only positional argument.
|
||||
cancel_callback (optional):
|
||||
The callback that was specified to execute after cancellation of the task (and the next one).
|
||||
It is run with the `task_id` as its only positional argument.
|
||||
"""
|
||||
await first_batch_started.wait()
|
||||
await self._queue_consumer(q, first_batch_started, func, arg_stars,
|
||||
end_callback=end_callback, cancel_callback=cancel_callback)
|
||||
await execute_optional(end_callback, args=(task_id,))
|
||||
|
||||
def _set_up_args_queue(self, args_iter: ArgsT, num_tasks: int) -> Queue:
|
||||
"""
|
||||
Helper function for `_map()`.
|
||||
Takes the iterable of function arguments `args_iter` and adds up to `num_tasks` to a new `asyncio.Queue`.
|
||||
The queue's `join()` method is added to the pool's `_before_gathering` list and the queue is returned.
|
||||
|
||||
If the iterable contains less than `num_tasks` elements, nothing else happens; otherwise the `_queue_producer`
|
||||
is started as a separate task with the arguments queue and and iterator of the remaining arguments.
|
||||
|
||||
Args:
|
||||
args_iter:
|
||||
The iterable of function arguments passed into `_map()` to use for creating the new tasks.
|
||||
num_tasks:
|
||||
The maximum number of the new tasks to run concurrently that was passed into `_map()`.
|
||||
|
||||
Returns:
|
||||
The newly created and filled arguments queue for spawning new tasks.
|
||||
"""
|
||||
# Setting the `maxsize` of the queue to `num_tasks` will ensure that no more than `num_tasks` tasks will run
|
||||
# concurrently because the size of the queue is what will determine the number of immediately started tasks in
|
||||
# the `_map()` method and each of those will only ever start (at most) one other task upon ending.
|
||||
args_queue = Queue(maxsize=num_tasks)
|
||||
self._before_gathering.append(join_queue(args_queue))
|
||||
args_iter = iter(args_iter)
|
||||
try:
|
||||
# Here we guarantee that the queue will contain as many arguments as needed for starting the first batch of
|
||||
# tasks, which will be at most `num_tasks` (meaning the queue will be full).
|
||||
for i in range(num_tasks):
|
||||
args_queue.put_nowait(next(args_iter))
|
||||
except StopIteration:
|
||||
return True
|
||||
return False
|
||||
# If we get here, this means that the number of elements in the arguments iterator was less than the
|
||||
# specified `num_tasks`. Still, the number of tasks to start immediately will be the size of the queue.
|
||||
# The `_queue_producer` won't be necessary, since we already put all the elements in the queue.
|
||||
pass
|
||||
else:
|
||||
# There may be more elements in the arguments iterator, so we need the `_queue_producer`.
|
||||
# It will have exclusive access to the `args_iter` from now on.
|
||||
# Since the queue is full already, it will wait until one of the tasks in the first batch ends,
|
||||
# before putting the next item in it.
|
||||
create_task(self._queue_producer(args_queue, args_iter))
|
||||
return args_queue
|
||||
|
||||
async def _map(self, func: CoroutineFunc, args_iter: ArgsT, arg_stars: int = 0, num_tasks: int = 1,
|
||||
end_callback: EndCallbackT = None, cancel_callback: CancelCallbackT = None) -> None:
|
||||
"""
|
||||
Creates coroutines with arguments from a supplied iterable and runs them as new tasks in the pool in batches.
|
||||
TODO: If task groups are implemented, consider adding all tasks from one call of this method to the same group
|
||||
and referring to "group size" rather than chunk/batch size.
|
||||
Each coroutine looks like `func(arg)`, `func(*arg)`, or `func(**arg)`, `arg` being an element from the iterable.
|
||||
|
||||
This method blocks, **only if** there is not enough room in the pool for the first batch of new tasks.
|
||||
|
||||
It sets up an internal arguments queue which is continuously filled while consuming the arguments iterable.
|
||||
|
||||
Args:
|
||||
func:
|
||||
The coroutine function to use for spawning the new tasks within the task pool.
|
||||
args_iter:
|
||||
The iterable of arguments; each element is to be passed into a `func` call when spawning a new task.
|
||||
arg_stars (optional):
|
||||
Whether or not to unpack an element from `args_iter` using stars; must be 0, 1, or 2.
|
||||
num_tasks (optional):
|
||||
The maximum number of the new tasks to run concurrently.
|
||||
end_callback (optional):
|
||||
A callback to execute after a task has ended.
|
||||
It is run with the task's ID as its only positional argument.
|
||||
cancel_callback (optional):
|
||||
A callback to execute after cancellation of a task.
|
||||
It is run with the task's ID as its only positional argument.
|
||||
|
||||
Raises:
|
||||
`asyncio_taskpool.exceptions.PoolIsClosed` if the pool has been closed.
|
||||
"""
|
||||
if not self.is_open:
|
||||
raise exceptions.PoolIsClosed("Cannot start new tasks")
|
||||
if self._all_tasks_known_flag.is_set():
|
||||
self._all_tasks_known_flag.clear()
|
||||
args_iter = iter(args_iter)
|
||||
for _ in range(num_tasks):
|
||||
reached_end = await self._start_next_task(func, args_iter, arg_stars, end_callback, cancel_callback)
|
||||
if reached_end:
|
||||
self._all_tasks_known_flag.set()
|
||||
break
|
||||
args_queue = self._set_up_args_queue(args_iter, num_tasks)
|
||||
# We need a flag to ensure that starting all tasks from the first batch here will not be blocked by the
|
||||
# `_queue_callback` triggered by one or more of them.
|
||||
# This could happen, e.g. if the pool has just enough room for one more task, but the queue here contains more
|
||||
# than one element, and the pool remains full until after the first task of the first batch ends. Then the
|
||||
# callback might trigger the next `_queue_consumer` before this method can, which will keep it blocked.
|
||||
first_batch_started = Event()
|
||||
for _ in range(args_queue.qsize()):
|
||||
# This is where blocking can occur, if the pool is full.
|
||||
await self._queue_consumer(args_queue, first_batch_started, func,
|
||||
arg_stars=arg_stars, end_callback=end_callback, cancel_callback=cancel_callback)
|
||||
# Now the callbacks can immediately trigger more tasks.
|
||||
first_batch_started.set()
|
||||
|
||||
async def map(self, func: CoroutineFunc, args_iter: ArgsT, num_tasks: int = 1,
|
||||
async def map(self, func: CoroutineFunc, arg_iter: ArgsT, num_tasks: int = 1,
|
||||
end_callback: EndCallbackT = None, cancel_callback: CancelCallbackT = None) -> None:
|
||||
await self._map(func, args_iter, arg_stars=0, num_tasks=num_tasks,
|
||||
"""
|
||||
An asyncio-task-based equivalent of the `multiprocessing.pool.Pool.map` method.
|
||||
|
||||
Creates coroutines with arguments from a supplied iterable and runs them as new tasks in the pool in batches.
|
||||
Each coroutine looks like `func(arg)`, `arg` being an element from the iterable.
|
||||
|
||||
Once the first batch of tasks has started to run, this method returns.
|
||||
As soon as on of them finishes, it triggers the start of a new task (assuming there is room in the pool)
|
||||
consuming the next element from the arguments iterable.
|
||||
If the size of the pool never imposes a limit, this ensures that there is almost continuously the desired number
|
||||
of tasks from this call concurrently running within the pool.
|
||||
|
||||
This method blocks, **only if** there is not enough room in the pool for the first batch of new tasks.
|
||||
|
||||
Args:
|
||||
func:
|
||||
The coroutine function to use for spawning the new tasks within the task pool.
|
||||
arg_iter:
|
||||
The iterable of arguments; each argument is to be passed into a `func` call when spawning a new task.
|
||||
num_tasks (optional):
|
||||
The maximum number of the new tasks to run concurrently.
|
||||
end_callback (optional):
|
||||
A callback to execute after a task has ended.
|
||||
It is run with the task's ID as its only positional argument.
|
||||
cancel_callback (optional):
|
||||
A callback to execute after cancellation of a task.
|
||||
It is run with the task's ID as its only positional argument.
|
||||
|
||||
Raises:
|
||||
`PoolIsClosed` if the pool has been closed.
|
||||
`NotCoroutine` if `func` is not a coroutine function.
|
||||
"""
|
||||
await self._map(func, arg_iter, arg_stars=0, num_tasks=num_tasks,
|
||||
end_callback=end_callback, cancel_callback=cancel_callback)
|
||||
|
||||
async def starmap(self, func: CoroutineFunc, args_iter: Iterable[ArgsT], num_tasks: int = 1,
|
||||
end_callback: EndCallbackT = None, cancel_callback: CancelCallbackT = None) -> None:
|
||||
"""
|
||||
Like `map()` except that the elements of `args_iter` are expected to be iterables themselves to be unpacked as
|
||||
positional arguments to the function.
|
||||
Each coroutine then looks like `func(*arg)`, `arg` being an element from `args_iter`.
|
||||
"""
|
||||
await self._map(func, args_iter, arg_stars=1, num_tasks=num_tasks,
|
||||
end_callback=end_callback, cancel_callback=cancel_callback)
|
||||
|
||||
async def doublestarmap(self, func: CoroutineFunc, kwargs_iter: Iterable[KwArgsT], num_tasks: int = 1,
|
||||
end_callback: EndCallbackT = None, cancel_callback: CancelCallbackT = None) -> None:
|
||||
"""
|
||||
Like `map()` except that the elements of `kwargs_iter` are expected to be iterables themselves to be unpacked as
|
||||
keyword-arguments to the function.
|
||||
Each coroutine then looks like `func(**arg)`, `arg` being an element from `kwargs_iter`.
|
||||
"""
|
||||
await self._map(func, kwargs_iter, arg_stars=2, num_tasks=num_tasks,
|
||||
end_callback=end_callback, cancel_callback=cancel_callback)
|
||||
|
||||
|
||||
class SimpleTaskPool(BaseTaskPool):
|
||||
"""
|
||||
Simplified task pool class.
|
||||
|
||||
A `SimpleTaskPool` instance can manage an arbitrary number of concurrent tasks,
|
||||
but they **must** come from a single coroutine function, called with the same arguments.
|
||||
|
||||
The coroutine function and its arguments are defined upon initialization.
|
||||
|
||||
As long as there is room in the pool, more tasks can be added. (By default, there is no pool size limit.)
|
||||
Each task started in the pool receives a unique ID, which can be used to cancel specific tasks at any moment.
|
||||
However, since all tasks come from the same function-arguments-combination, the specificity of the `cancel()` method
|
||||
is probably unnecessary. Instead, a simpler `stop()` method is introduced.
|
||||
|
||||
Adding tasks blocks **only if** the pool is full at that moment.
|
||||
"""
|
||||
|
||||
def __init__(self, func: CoroutineFunc, args: ArgsT = (), kwargs: KwArgsT = None,
|
||||
end_callback: EndCallbackT = None, cancel_callback: CancelCallbackT = None,
|
||||
name: str = None) -> None:
|
||||
pool_size: int = inf, name: str = None) -> None:
|
||||
"""
|
||||
|
||||
Args:
|
||||
func:
|
||||
The function to use for spawning new tasks within the pool.
|
||||
args (optional):
|
||||
The positional arguments to pass into each function call.
|
||||
kwargs (optional):
|
||||
The keyword-arguments to pass into each function call.
|
||||
end_callback (optional):
|
||||
A callback to execute after a task has ended.
|
||||
It is run with the task's ID as its only positional argument.
|
||||
cancel_callback (optional):
|
||||
A callback to execute after cancellation of a task.
|
||||
It is run with the task's ID as its only positional argument.
|
||||
pool_size (optional):
|
||||
The maximum number of tasks allowed to run concurrently in the pool
|
||||
name (optional):
|
||||
An optional name for the pool.
|
||||
"""
|
||||
if not iscoroutinefunction(func):
|
||||
raise exceptions.NotCoroutine(f"Not a coroutine function: {func}")
|
||||
self._func: CoroutineFunc = func
|
||||
@ -416,32 +702,39 @@ class SimpleTaskPool(BaseTaskPool):
|
||||
self._kwargs: KwArgsT = kwargs if kwargs is not None else {}
|
||||
self._end_callback: EndCallbackT = end_callback
|
||||
self._cancel_callback: CancelCallbackT = cancel_callback
|
||||
super().__init__(name=name)
|
||||
super().__init__(pool_size=pool_size, name=name)
|
||||
|
||||
@property
|
||||
def func_name(self) -> str:
|
||||
"""Returns the name of the coroutine function used in the pool."""
|
||||
return self._func.__name__
|
||||
|
||||
@property
|
||||
def size(self) -> int:
|
||||
return self.num_running
|
||||
|
||||
async def _start_one(self) -> int:
|
||||
"""Starts a single new task within the pool and returns its ID."""
|
||||
return await self._start_task(self._func(*self._args, **self._kwargs),
|
||||
end_callback=self._end_callback, cancel_callback=self._cancel_callback)
|
||||
|
||||
async def start(self, num: int = 1) -> List[int]:
|
||||
return [await self._start_one() for _ in range(num)]
|
||||
"""Starts `num` new tasks within the pool and returns their IDs as a list."""
|
||||
ids = await gather(*(self._start_one() for _ in range(num)))
|
||||
assert isinstance(ids, list) # for PyCharm (see above to-do-item)
|
||||
return ids
|
||||
|
||||
def stop(self, num: int = 1) -> List[int]:
|
||||
num = min(num, self.size)
|
||||
"""
|
||||
Cancels `num` running tasks within the pool and returns their IDs as a list.
|
||||
|
||||
The tasks are canceled in LIFO order, meaning tasks started later will be stopped before those started earlier.
|
||||
If `num` is greater than or equal to the number of currently running tasks, naturally all tasks are cancelled.
|
||||
"""
|
||||
ids = []
|
||||
for i, task_id in enumerate(reversed(self._running)):
|
||||
if i >= num:
|
||||
break
|
||||
break # We got the desired number of task IDs, there may well be more tasks left to keep running
|
||||
ids.append(task_id)
|
||||
self.cancel(*ids)
|
||||
return ids
|
||||
|
||||
def stop_all(self) -> List[int]:
|
||||
return self.stop(self.size)
|
||||
"""Cancels all running tasks and returns their IDs as a list."""
|
||||
return self.stop(self.num_running)
|
||||
|
@ -63,8 +63,8 @@ class ControlServer(ABC): # TODO: Implement interface for normal TaskPool insta
|
||||
writer.write(str(self._pool.stop_all()).encode())
|
||||
|
||||
def _pool_size(self, writer: StreamWriter) -> None:
|
||||
log.debug("%s requests pool size", self.client_class.__name__)
|
||||
writer.write(str(self._pool.size).encode())
|
||||
log.debug("%s requests number of running tasks", self.client_class.__name__)
|
||||
writer.write(str(self._pool.num_running).encode())
|
||||
|
||||
def _pool_func(self, writer: StreamWriter) -> None:
|
||||
log.debug("%s requests pool function", self.client_class.__name__)
|
||||
@ -83,7 +83,7 @@ class ControlServer(ABC): # TODO: Implement interface for normal TaskPool insta
|
||||
self._stop_tasks(writer, arg)
|
||||
elif cmd == constants.CMD_STOP_ALL:
|
||||
self._stop_all_tasks(writer)
|
||||
elif cmd == constants.CMD_SIZE:
|
||||
elif cmd == constants.CMD_NUM_RUNNING:
|
||||
self._pool_size(writer)
|
||||
elif cmd == constants.CMD_FUNC:
|
||||
self._pool_func(writer)
|
||||
|
@ -1,7 +1,9 @@
|
||||
import asyncio
|
||||
from asyncio.exceptions import CancelledError
|
||||
from asyncio.queues import Queue
|
||||
from unittest import IsolatedAsyncioTestCase
|
||||
from unittest.mock import PropertyMock, MagicMock, AsyncMock, patch, call
|
||||
from typing import Type
|
||||
|
||||
from asyncio_taskpool import pool, exceptions
|
||||
|
||||
@ -14,7 +16,12 @@ class TestException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class BaseTaskPoolTestCase(IsolatedAsyncioTestCase):
|
||||
class CommonTestCase(IsolatedAsyncioTestCase):
|
||||
TEST_CLASS: Type[pool.BaseTaskPool] = pool.BaseTaskPool
|
||||
TEST_POOL_SIZE: int = 420
|
||||
TEST_POOL_NAME: str = 'test123'
|
||||
|
||||
task_pool: pool.BaseTaskPool
|
||||
log_lvl: int
|
||||
|
||||
@classmethod
|
||||
@ -26,35 +33,38 @@ class BaseTaskPoolTestCase(IsolatedAsyncioTestCase):
|
||||
def tearDownClass(cls) -> None:
|
||||
pool.log.setLevel(cls.log_lvl)
|
||||
|
||||
def setUp(self) -> None:
|
||||
self._pools = getattr(pool.BaseTaskPool, '_pools')
|
||||
def get_task_pool_init_params(self) -> dict:
|
||||
return {'pool_size': self.TEST_POOL_SIZE, 'name': self.TEST_POOL_NAME}
|
||||
|
||||
# These three methods are called during initialization, so we mock them by default during setup
|
||||
self._add_pool_patcher = patch.object(pool.BaseTaskPool, '_add_pool')
|
||||
self.pool_size_patcher = patch.object(pool.BaseTaskPool, 'pool_size', new_callable=PropertyMock)
|
||||
self.__str___patcher = patch.object(pool.BaseTaskPool, '__str__')
|
||||
def setUp(self) -> None:
|
||||
self._pools = self.TEST_CLASS._pools
|
||||
# These three methods are called during initialization, so we mock them by default during setup:
|
||||
self._add_pool_patcher = patch.object(self.TEST_CLASS, '_add_pool')
|
||||
self.pool_size_patcher = patch.object(self.TEST_CLASS, 'pool_size', new_callable=PropertyMock)
|
||||
self.dunder_str_patcher = patch.object(self.TEST_CLASS, '__str__')
|
||||
self.mock__add_pool = self._add_pool_patcher.start()
|
||||
self.mock_pool_size = self.pool_size_patcher.start()
|
||||
self.mock___str__ = self.__str___patcher.start()
|
||||
self.mock___str__ = self.dunder_str_patcher.start()
|
||||
self.mock__add_pool.return_value = self.mock_idx = 123
|
||||
self.mock___str__.return_value = self.mock_str = 'foobar'
|
||||
|
||||
# Test pool parameters:
|
||||
self.test_pool_size, self.test_pool_name = 420, 'test123'
|
||||
self.task_pool = pool.BaseTaskPool(pool_size=self.test_pool_size, name=self.test_pool_name)
|
||||
self.task_pool = self.TEST_CLASS(**self.get_task_pool_init_params())
|
||||
|
||||
def tearDown(self) -> None:
|
||||
setattr(pool.TaskPool, '_pools', self._pools)
|
||||
self.TEST_CLASS._pools.clear()
|
||||
self._add_pool_patcher.stop()
|
||||
self.pool_size_patcher.stop()
|
||||
self.__str___patcher.stop()
|
||||
self.dunder_str_patcher.stop()
|
||||
|
||||
|
||||
class BaseTaskPoolTestCase(CommonTestCase):
|
||||
|
||||
def test__add_pool(self):
|
||||
self.assertListEqual(EMPTY_LIST, self._pools)
|
||||
self._add_pool_patcher.stop()
|
||||
output = pool.TaskPool._add_pool(self.task_pool)
|
||||
output = pool.BaseTaskPool._add_pool(self.task_pool)
|
||||
self.assertEqual(0, output)
|
||||
self.assertListEqual([self.task_pool], getattr(pool.TaskPool, '_pools'))
|
||||
self.assertListEqual([self.task_pool], pool.BaseTaskPool._pools)
|
||||
|
||||
def test_init(self):
|
||||
self.assertIsInstance(self.task_pool._enough_room, asyncio.locks.Semaphore)
|
||||
@ -66,27 +76,26 @@ class BaseTaskPoolTestCase(IsolatedAsyncioTestCase):
|
||||
self.assertEqual(0, self.task_pool._num_cancelled)
|
||||
self.assertEqual(0, self.task_pool._num_ended)
|
||||
self.assertEqual(self.mock_idx, self.task_pool._idx)
|
||||
self.assertEqual(self.test_pool_name, self.task_pool._name)
|
||||
self.assertIsInstance(self.task_pool._all_tasks_known_flag, asyncio.locks.Event)
|
||||
self.assertTrue(self.task_pool._all_tasks_known_flag.is_set())
|
||||
self.assertEqual(self.TEST_POOL_NAME, self.task_pool._name)
|
||||
self.assertListEqual(self.task_pool._before_gathering, EMPTY_LIST)
|
||||
self.assertIsInstance(self.task_pool._interrupt_flag, asyncio.locks.Event)
|
||||
self.assertFalse(self.task_pool._interrupt_flag.is_set())
|
||||
self.mock__add_pool.assert_called_once_with(self.task_pool)
|
||||
self.mock_pool_size.assert_called_once_with(self.test_pool_size)
|
||||
self.mock_pool_size.assert_called_once_with(self.TEST_POOL_SIZE)
|
||||
self.mock___str__.assert_called_once_with()
|
||||
|
||||
def test___str__(self):
|
||||
self.__str___patcher.stop()
|
||||
expected_str = f'{pool.BaseTaskPool.__name__}-{self.test_pool_name}'
|
||||
self.dunder_str_patcher.stop()
|
||||
expected_str = f'{pool.BaseTaskPool.__name__}-{self.TEST_POOL_NAME}'
|
||||
self.assertEqual(expected_str, str(self.task_pool))
|
||||
setattr(self.task_pool, '_name', None)
|
||||
self.task_pool._name = None
|
||||
expected_str = f'{pool.BaseTaskPool.__name__}-{self.task_pool._idx}'
|
||||
self.assertEqual(expected_str, str(self.task_pool))
|
||||
|
||||
def test_pool_size(self):
|
||||
self.pool_size_patcher.stop()
|
||||
self.task_pool._pool_size = self.test_pool_size
|
||||
self.assertEqual(self.test_pool_size, self.task_pool.pool_size)
|
||||
self.task_pool._pool_size = self.TEST_POOL_SIZE
|
||||
self.assertEqual(self.TEST_POOL_SIZE, self.task_pool.pool_size)
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
self.task_pool.pool_size = -1
|
||||
@ -342,11 +351,11 @@ class BaseTaskPoolTestCase(IsolatedAsyncioTestCase):
|
||||
self.assertFalse(self.task_pool._open)
|
||||
|
||||
async def test_gather(self):
|
||||
mock_wait = AsyncMock()
|
||||
self.task_pool._all_tasks_known_flag = MagicMock(wait=mock_wait)
|
||||
test_exception = TestException()
|
||||
mock_ended_func, mock_cancelled_func = AsyncMock(return_value=FOO), AsyncMock(side_effect=test_exception)
|
||||
mock_running_func = AsyncMock(return_value=BAR)
|
||||
mock_queue_join = AsyncMock()
|
||||
self.task_pool._before_gathering = before_gather = [mock_queue_join()]
|
||||
self.task_pool._ended = ended = {123: mock_ended_func()}
|
||||
self.task_pool._cancelled = cancelled = {456: mock_cancelled_func()}
|
||||
self.task_pool._running = running = {789: mock_running_func()}
|
||||
@ -358,25 +367,307 @@ class BaseTaskPoolTestCase(IsolatedAsyncioTestCase):
|
||||
self.assertDictEqual(self.task_pool._ended, ended)
|
||||
self.assertDictEqual(self.task_pool._cancelled, cancelled)
|
||||
self.assertDictEqual(self.task_pool._running, running)
|
||||
self.assertListEqual(self.task_pool._before_gathering, before_gather)
|
||||
self.assertTrue(self.task_pool._interrupt_flag.is_set())
|
||||
mock_wait.assert_not_awaited()
|
||||
|
||||
self.task_pool._open = False
|
||||
|
||||
def check_assertions() -> None:
|
||||
def check_assertions(output) -> None:
|
||||
self.assertListEqual([FOO, test_exception, BAR], output)
|
||||
self.assertDictEqual(self.task_pool._ended, EMPTY_DICT)
|
||||
self.assertDictEqual(self.task_pool._cancelled, EMPTY_DICT)
|
||||
self.assertDictEqual(self.task_pool._running, EMPTY_DICT)
|
||||
self.assertListEqual(self.task_pool._before_gathering, EMPTY_LIST)
|
||||
self.assertFalse(self.task_pool._interrupt_flag.is_set())
|
||||
mock_wait.assert_awaited_once_with()
|
||||
|
||||
output = await self.task_pool.gather(return_exceptions=True)
|
||||
check_assertions()
|
||||
mock_wait.reset_mock()
|
||||
check_assertions(await self.task_pool.gather(return_exceptions=True))
|
||||
|
||||
self.task_pool._before_gathering = [mock_queue_join()]
|
||||
self.task_pool._ended = {123: mock_ended_func()}
|
||||
self.task_pool._cancelled = {456: mock_cancelled_func()}
|
||||
self.task_pool._running = {789: mock_running_func()}
|
||||
output = await self.task_pool.gather(return_exceptions=True)
|
||||
check_assertions()
|
||||
check_assertions(await self.task_pool.gather(return_exceptions=True))
|
||||
|
||||
|
||||
class TaskPoolTestCase(CommonTestCase):
|
||||
TEST_CLASS = pool.TaskPool
|
||||
task_pool: pool.TaskPool
|
||||
|
||||
@patch.object(pool.TaskPool, '_start_task')
|
||||
async def test__apply_one(self, mock__start_task: AsyncMock):
|
||||
mock__start_task.return_value = expected_output = 12345
|
||||
mock_awaitable = MagicMock()
|
||||
mock_func = MagicMock(return_value=mock_awaitable)
|
||||
args, kwargs = (FOO, BAR), {'a': 1, 'b': 2}
|
||||
end_cb, cancel_cb = MagicMock(), MagicMock()
|
||||
output = await self.task_pool._apply_one(mock_func, args, kwargs, end_cb, cancel_cb)
|
||||
self.assertEqual(expected_output, output)
|
||||
mock_func.assert_called_once_with(*args, **kwargs)
|
||||
mock__start_task.assert_awaited_once_with(mock_awaitable, end_callback=end_cb, cancel_callback=cancel_cb)
|
||||
|
||||
mock_func.reset_mock()
|
||||
mock__start_task.reset_mock()
|
||||
|
||||
output = await self.task_pool._apply_one(mock_func, args, None, end_cb, cancel_cb)
|
||||
self.assertEqual(expected_output, output)
|
||||
mock_func.assert_called_once_with(*args)
|
||||
mock__start_task.assert_awaited_once_with(mock_awaitable, end_callback=end_cb, cancel_callback=cancel_cb)
|
||||
|
||||
@patch.object(pool.TaskPool, '_apply_one')
|
||||
async def test_apply(self, mock__apply_one: AsyncMock):
|
||||
mock__apply_one.return_value = mock_id = 67890
|
||||
mock_func, num = MagicMock(), 3
|
||||
args, kwargs = (FOO, BAR), {'a': 1, 'b': 2}
|
||||
end_cb, cancel_cb = MagicMock(), MagicMock()
|
||||
expected_output = num * [mock_id]
|
||||
output = await self.task_pool.apply(mock_func, args, kwargs, num, end_cb, cancel_cb)
|
||||
self.assertEqual(expected_output, output)
|
||||
mock__apply_one.assert_has_awaits(num * [call(mock_func, args, kwargs, end_cb, cancel_cb)])
|
||||
|
||||
async def test__queue_producer(self):
|
||||
mock_put = AsyncMock()
|
||||
mock_q = MagicMock(put=mock_put)
|
||||
args = (FOO, BAR, 123)
|
||||
assert not self.task_pool._interrupt_flag.is_set()
|
||||
self.assertIsNone(await self.task_pool._queue_producer(mock_q, args))
|
||||
mock_put.assert_has_awaits([call(arg) for arg in args])
|
||||
mock_put.reset_mock()
|
||||
self.task_pool._interrupt_flag.set()
|
||||
self.assertIsNone(await self.task_pool._queue_producer(mock_q, args))
|
||||
mock_put.assert_not_awaited()
|
||||
|
||||
@patch.object(pool, 'partial')
|
||||
@patch.object(pool, 'star_function')
|
||||
@patch.object(pool.TaskPool, '_start_task')
|
||||
async def test__queue_consumer(self, mock__start_task: AsyncMock, mock_star_function: MagicMock,
|
||||
mock_partial: MagicMock):
|
||||
mock_partial.return_value = queue_callback = 'not really'
|
||||
mock_star_function.return_value = awaitable = 'totally an awaitable'
|
||||
q, arg = Queue(), 420.69
|
||||
q.put_nowait(arg)
|
||||
mock_func, stars = MagicMock(), 3
|
||||
mock_flag, end_cb, cancel_cb = MagicMock(), MagicMock(), MagicMock()
|
||||
self.assertIsNone(await self.task_pool._queue_consumer(q, mock_flag, mock_func, stars, end_cb, cancel_cb))
|
||||
self.assertTrue(q.empty())
|
||||
mock__start_task.assert_awaited_once_with(awaitable, ignore_closed=True,
|
||||
end_callback=queue_callback, cancel_callback=cancel_cb)
|
||||
mock_star_function.assert_called_once_with(mock_func, arg, arg_stars=stars)
|
||||
mock_partial.assert_called_once_with(pool.TaskPool._queue_callback, self.task_pool,
|
||||
q=q, first_batch_started=mock_flag, func=mock_func, arg_stars=stars,
|
||||
end_callback=end_cb, cancel_callback=cancel_cb)
|
||||
mock__start_task.reset_mock()
|
||||
mock_star_function.reset_mock()
|
||||
mock_partial.reset_mock()
|
||||
|
||||
self.assertIsNone(await self.task_pool._queue_consumer(q, mock_func, stars, end_cb, cancel_cb))
|
||||
self.assertTrue(q.empty())
|
||||
mock__start_task.assert_not_awaited()
|
||||
mock_star_function.assert_not_called()
|
||||
mock_partial.assert_not_called()
|
||||
|
||||
@patch.object(pool, 'execute_optional')
|
||||
@patch.object(pool.TaskPool, '_queue_consumer')
|
||||
async def test__queue_callback(self, mock__queue_consumer: AsyncMock, mock_execute_optional: AsyncMock):
|
||||
task_id, mock_q = 420, MagicMock()
|
||||
mock_func, stars = MagicMock(), 3
|
||||
mock_wait = AsyncMock()
|
||||
mock_flag = MagicMock(wait=mock_wait)
|
||||
end_cb, cancel_cb = MagicMock(), MagicMock()
|
||||
self.assertIsNone(await self.task_pool._queue_callback(task_id, mock_q, mock_flag, mock_func, stars,
|
||||
end_callback=end_cb, cancel_callback=cancel_cb))
|
||||
mock_wait.assert_awaited_once_with()
|
||||
mock__queue_consumer.assert_awaited_once_with(mock_q, mock_flag, mock_func, stars,
|
||||
end_callback=end_cb, cancel_callback=cancel_cb)
|
||||
mock_execute_optional.assert_awaited_once_with(end_cb, args=(task_id,))
|
||||
|
||||
@patch.object(pool, 'iter')
|
||||
@patch.object(pool, 'create_task')
|
||||
@patch.object(pool, 'join_queue', new_callable=MagicMock)
|
||||
@patch.object(pool.TaskPool, '_queue_producer', new_callable=MagicMock)
|
||||
async def test__set_up_args_queue(self, mock__queue_producer: MagicMock, mock_join_queue: MagicMock,
|
||||
mock_create_task: MagicMock, mock_iter: MagicMock):
|
||||
args, num_tasks = (FOO, BAR, 1, 2, 3), 2
|
||||
mock_join_queue.return_value = mock_join = 'awaitable'
|
||||
mock_iter.return_value = args_iter = iter(args)
|
||||
mock__queue_producer.return_value = mock_producer_coro = 'very awaitable'
|
||||
output_q = self.task_pool._set_up_args_queue(args, num_tasks)
|
||||
self.assertIsInstance(output_q, Queue)
|
||||
self.assertEqual(num_tasks, output_q.qsize())
|
||||
for arg in args[:num_tasks]:
|
||||
self.assertEqual(arg, output_q.get_nowait())
|
||||
self.assertTrue(output_q.empty())
|
||||
for arg in args[num_tasks:]:
|
||||
self.assertEqual(arg, next(args_iter))
|
||||
with self.assertRaises(StopIteration):
|
||||
next(args_iter)
|
||||
self.assertListEqual([mock_join], self.task_pool._before_gathering)
|
||||
mock_join_queue.assert_called_once_with(output_q)
|
||||
mock__queue_producer.assert_called_once_with(output_q, args_iter)
|
||||
mock_create_task.assert_called_once_with(mock_producer_coro)
|
||||
|
||||
self.task_pool._before_gathering.clear()
|
||||
mock_join_queue.reset_mock()
|
||||
mock__queue_producer.reset_mock()
|
||||
mock_create_task.reset_mock()
|
||||
|
||||
num_tasks = 6
|
||||
mock_iter.return_value = args_iter = iter(args)
|
||||
output_q = self.task_pool._set_up_args_queue(args, num_tasks)
|
||||
self.assertIsInstance(output_q, Queue)
|
||||
self.assertEqual(len(args), output_q.qsize())
|
||||
for arg in args:
|
||||
self.assertEqual(arg, output_q.get_nowait())
|
||||
self.assertTrue(output_q.empty())
|
||||
with self.assertRaises(StopIteration):
|
||||
next(args_iter)
|
||||
self.assertListEqual([mock_join], self.task_pool._before_gathering)
|
||||
mock_join_queue.assert_called_once_with(output_q)
|
||||
mock__queue_producer.assert_not_called()
|
||||
mock_create_task.assert_not_called()
|
||||
|
||||
@patch.object(pool, 'Event')
|
||||
@patch.object(pool.TaskPool, '_queue_consumer')
|
||||
@patch.object(pool.TaskPool, '_set_up_args_queue')
|
||||
@patch.object(pool.TaskPool, 'is_open', new_callable=PropertyMock)
|
||||
async def test__map(self, mock_is_open: MagicMock, mock__set_up_args_queue: MagicMock,
|
||||
mock__queue_consumer: AsyncMock, mock_event_cls: MagicMock):
|
||||
qsize = 4
|
||||
mock__set_up_args_queue.return_value = mock_q = MagicMock(qsize=MagicMock(return_value=qsize))
|
||||
mock_flag_set = MagicMock()
|
||||
mock_event_cls.return_value = mock_flag = MagicMock(set=mock_flag_set)
|
||||
|
||||
mock_func, stars = MagicMock(), 3
|
||||
args_iter, num_tasks = (FOO, BAR, 1, 2, 3), 2
|
||||
end_cb, cancel_cb = MagicMock(), MagicMock()
|
||||
|
||||
mock_is_open.return_value = False
|
||||
with self.assertRaises(exceptions.PoolIsClosed):
|
||||
await self.task_pool._map(mock_func, args_iter, stars, num_tasks, end_cb, cancel_cb)
|
||||
mock_is_open.assert_called_once_with()
|
||||
mock__set_up_args_queue.assert_not_called()
|
||||
mock__queue_consumer.assert_not_awaited()
|
||||
mock_flag_set.assert_not_called()
|
||||
|
||||
mock_is_open.reset_mock()
|
||||
|
||||
mock_is_open.return_value = True
|
||||
self.assertIsNone(await self.task_pool._map(mock_func, args_iter, stars, num_tasks, end_cb, cancel_cb))
|
||||
mock__set_up_args_queue.assert_called_once_with(args_iter, num_tasks)
|
||||
mock__queue_consumer.assert_has_awaits(qsize * [call(mock_q, mock_flag, mock_func, arg_stars=stars,
|
||||
end_callback=end_cb, cancel_callback=cancel_cb)])
|
||||
mock_flag_set.assert_called_once_with()
|
||||
|
||||
@patch.object(pool.TaskPool, '_map')
|
||||
async def test_map(self, mock__map: AsyncMock):
|
||||
mock_func = MagicMock()
|
||||
arg_iter, num_tasks = (FOO, BAR, 1, 2, 3), 2
|
||||
end_cb, cancel_cb = MagicMock(), MagicMock()
|
||||
self.assertIsNone(await self.task_pool.map(mock_func, arg_iter, num_tasks, end_cb, cancel_cb))
|
||||
mock__map.assert_awaited_once_with(mock_func, arg_iter, arg_stars=0, num_tasks=num_tasks,
|
||||
end_callback=end_cb, cancel_callback=cancel_cb)
|
||||
|
||||
@patch.object(pool.TaskPool, '_map')
|
||||
async def test_starmap(self, mock__map: AsyncMock):
|
||||
mock_func = MagicMock()
|
||||
args_iter, num_tasks = ([FOO], [BAR]), 2
|
||||
end_cb, cancel_cb = MagicMock(), MagicMock()
|
||||
self.assertIsNone(await self.task_pool.starmap(mock_func, args_iter, num_tasks, end_cb, cancel_cb))
|
||||
mock__map.assert_awaited_once_with(mock_func, args_iter, arg_stars=1, num_tasks=num_tasks,
|
||||
end_callback=end_cb, cancel_callback=cancel_cb)
|
||||
|
||||
@patch.object(pool.TaskPool, '_map')
|
||||
async def test_doublestarmap(self, mock__map: AsyncMock):
|
||||
mock_func = MagicMock()
|
||||
kwargs_iter, num_tasks = [{'a': FOO}, {'a': BAR}], 2
|
||||
end_cb, cancel_cb = MagicMock(), MagicMock()
|
||||
self.assertIsNone(await self.task_pool.doublestarmap(mock_func, kwargs_iter, num_tasks, end_cb, cancel_cb))
|
||||
mock__map.assert_awaited_once_with(mock_func, kwargs_iter, arg_stars=2, num_tasks=num_tasks,
|
||||
end_callback=end_cb, cancel_callback=cancel_cb)
|
||||
|
||||
|
||||
class SimpleTaskPoolTestCase(CommonTestCase):
|
||||
TEST_CLASS = pool.SimpleTaskPool
|
||||
task_pool: pool.SimpleTaskPool
|
||||
|
||||
TEST_POOL_FUNC = AsyncMock(__name__=FOO)
|
||||
TEST_POOL_ARGS = (FOO, BAR)
|
||||
TEST_POOL_KWARGS = {'a': 1, 'b': 2}
|
||||
TEST_POOL_END_CB = MagicMock()
|
||||
TEST_POOL_CANCEL_CB = MagicMock()
|
||||
|
||||
def get_task_pool_init_params(self) -> dict:
|
||||
return super().get_task_pool_init_params() | {
|
||||
'func': self.TEST_POOL_FUNC,
|
||||
'args': self.TEST_POOL_ARGS,
|
||||
'kwargs': self.TEST_POOL_KWARGS,
|
||||
'end_callback': self.TEST_POOL_END_CB,
|
||||
'cancel_callback': self.TEST_POOL_CANCEL_CB,
|
||||
}
|
||||
|
||||
def setUp(self) -> None:
|
||||
self.base_class_init_patcher = patch.object(pool.BaseTaskPool, '__init__')
|
||||
self.base_class_init = self.base_class_init_patcher.start()
|
||||
super().setUp()
|
||||
|
||||
def tearDown(self) -> None:
|
||||
self.base_class_init_patcher.stop()
|
||||
|
||||
def test_init(self):
|
||||
self.assertEqual(self.TEST_POOL_FUNC, self.task_pool._func)
|
||||
self.assertEqual(self.TEST_POOL_ARGS, self.task_pool._args)
|
||||
self.assertEqual(self.TEST_POOL_KWARGS, self.task_pool._kwargs)
|
||||
self.assertEqual(self.TEST_POOL_END_CB, self.task_pool._end_callback)
|
||||
self.assertEqual(self.TEST_POOL_CANCEL_CB, self.task_pool._cancel_callback)
|
||||
self.base_class_init.assert_called_once_with(pool_size=self.TEST_POOL_SIZE, name=self.TEST_POOL_NAME)
|
||||
|
||||
with self.assertRaises(exceptions.NotCoroutine):
|
||||
pool.SimpleTaskPool(MagicMock())
|
||||
|
||||
def test_func_name(self):
|
||||
self.assertEqual(self.TEST_POOL_FUNC.__name__, self.task_pool.func_name)
|
||||
|
||||
@patch.object(pool.SimpleTaskPool, '_start_task')
|
||||
async def test__start_one(self, mock__start_task: AsyncMock):
|
||||
mock__start_task.return_value = expected_output = 99
|
||||
self.task_pool._func = MagicMock(return_value=BAR)
|
||||
output = await self.task_pool._start_one()
|
||||
self.assertEqual(expected_output, output)
|
||||
self.task_pool._func.assert_called_once_with(*self.task_pool._args, **self.task_pool._kwargs)
|
||||
mock__start_task.assert_awaited_once_with(BAR, end_callback=self.task_pool._end_callback,
|
||||
cancel_callback=self.task_pool._cancel_callback)
|
||||
|
||||
@patch.object(pool.SimpleTaskPool, '_start_one')
|
||||
async def test_start(self, mock__start_one: AsyncMock):
|
||||
mock__start_one.return_value = FOO
|
||||
num = 5
|
||||
output = await self.task_pool.start(num)
|
||||
expected_output = num * [FOO]
|
||||
self.assertListEqual(expected_output, output)
|
||||
mock__start_one.assert_has_awaits(num * [call()])
|
||||
|
||||
@patch.object(pool.SimpleTaskPool, 'cancel')
|
||||
def test_stop(self, mock_cancel: MagicMock):
|
||||
num = 2
|
||||
id1, id2, id3 = 5, 6, 7
|
||||
self.task_pool._running = {id1: FOO, id2: BAR, id3: FOO + BAR}
|
||||
output = self.task_pool.stop(num)
|
||||
expected_output = [id3, id2]
|
||||
self.assertEqual(expected_output, output)
|
||||
mock_cancel.assert_called_once_with(*expected_output)
|
||||
mock_cancel.reset_mock()
|
||||
|
||||
num = 50
|
||||
output = self.task_pool.stop(num)
|
||||
expected_output = [id3, id2, id1]
|
||||
self.assertEqual(expected_output, output)
|
||||
mock_cancel.assert_called_once_with(*expected_output)
|
||||
|
||||
@patch.object(pool.SimpleTaskPool, 'num_running', new_callable=PropertyMock)
|
||||
@patch.object(pool.SimpleTaskPool, 'stop')
|
||||
def test_stop_all(self, mock_stop: MagicMock, mock_num_running: MagicMock):
|
||||
mock_num_running.return_value = num = 9876
|
||||
mock_stop.return_value = expected_output = 'something'
|
||||
output = self.task_pool.stop_all()
|
||||
self.assertEqual(expected_output, output)
|
||||
mock_num_running.assert_called_once_with()
|
||||
mock_stop.assert_called_once_with(num)
|
||||
|
156
usage/USAGE.md
156
usage/USAGE.md
@ -2,7 +2,7 @@
|
||||
|
||||
## Minimal example for `SimpleTaskPool`
|
||||
|
||||
The minimum required setup is a "worker" coroutine function that can do something asynchronously, a main coroutine function that sets up the `SimpleTaskPool` and starts/stops the tasks as desired, eventually awaiting them all.
|
||||
The minimum required setup is a "worker" coroutine function that can do something asynchronously, and a main coroutine function that sets up the `SimpleTaskPool`, starts/stops the tasks as desired, and eventually awaits them all.
|
||||
|
||||
The following demo code enables full log output first for additional clarity. It is complete and should work as is.
|
||||
|
||||
@ -77,6 +77,156 @@ did 4
|
||||
did 4
|
||||
```
|
||||
|
||||
## Advanced example
|
||||
## Advanced example for `TaskPool`
|
||||
|
||||
...
|
||||
This time, we want to start tasks from _different_ coroutine functions **and** with _different_ arguments. For this we need an instance of the more generalized `TaskPool` class.
|
||||
|
||||
As with the simple example, we need "worker" coroutine functions that can do something asynchronously, as well as a main coroutine function that sets up the pool, starts the tasks, and eventually awaits them.
|
||||
|
||||
The following demo code enables full log output first for additional clarity. It is complete and should work as is.
|
||||
|
||||
### Code
|
||||
```python
|
||||
import logging
|
||||
import asyncio
|
||||
|
||||
from asyncio_taskpool.pool import TaskPool
|
||||
|
||||
|
||||
logging.getLogger().setLevel(logging.NOTSET)
|
||||
logging.getLogger('asyncio_taskpool').addHandler(logging.StreamHandler())
|
||||
|
||||
|
||||
async def work(start: int, stop: int, step: int = 1) -> None:
|
||||
"""Pseudo-worker function counting through a range with a second of sleep in between each iteration."""
|
||||
for i in range(start, stop, step):
|
||||
await asyncio.sleep(1)
|
||||
print("work with", i)
|
||||
|
||||
|
||||
async def other_work(a: int, b: int) -> None:
|
||||
"""Different pseudo-worker counting through a range with half a second of sleep in between each iteration."""
|
||||
for i in range(a, b):
|
||||
await asyncio.sleep(0.5)
|
||||
print("other_work with", i)
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
# Initialize a new task pool instance and limit its size to 3 tasks.
|
||||
pool = TaskPool(3)
|
||||
# Queue up two tasks (IDs 0 and 1) to run concurrently (with the same positional arguments).
|
||||
print("Called `apply`")
|
||||
await pool.apply(work, kwargs={'start': 100, 'stop': 200, 'step': 10}, num=2)
|
||||
# Let the tasks work for a bit.
|
||||
await asyncio.sleep(1.5)
|
||||
# Now, let us enqueue four more tasks (which will receive IDs 2, 3, 4, and 5), each created with different
|
||||
# positional arguments by using `starmap`, but have **no more than two of those** run concurrently.
|
||||
# Since we set our pool size to 3, and already have two tasks working within the pool,
|
||||
# only the first one of these will start immediately (and receive ID 2).
|
||||
# The second one will start (with ID 3), only once there is room in the pool,
|
||||
# which -- in this example -- will be the case after ID 2 ends;
|
||||
# until then the `starmap` method call **will block**!
|
||||
# Once there is room in the pool again, the third one will start immediately (and receive ID 4).
|
||||
# The last one will start (with ID 5) **only** once there is room in the pool **and** no more than one of these
|
||||
# last four tasks is running.
|
||||
args_list = [(0, 10), (10, 20), (20, 30), (30, 40)]
|
||||
print("Calling `starmap`...")
|
||||
await pool.starmap(other_work, args_list, num_tasks=2)
|
||||
print("`starmap` returned")
|
||||
# Now we close the pool, so that we can safely await all our tasks.
|
||||
pool.close()
|
||||
# Finally, we block, until all tasks have ended.
|
||||
print("Called `gather`")
|
||||
await pool.gather()
|
||||
print("Done.")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
### Output
|
||||
Additional comments for the output are provided with `<---` next to the output lines.
|
||||
|
||||
(Keep in mind that the logger and `print` asynchronously write to `stdout`.)
|
||||
```
|
||||
TaskPool-0 initialized
|
||||
Started TaskPool-0_Task-0
|
||||
Started TaskPool-0_Task-1
|
||||
Called `apply`
|
||||
work with 100
|
||||
work with 100
|
||||
Calling `starmap`... <--- notice that this blocks as expected
|
||||
Started TaskPool-0_Task-2
|
||||
work with 110
|
||||
work with 110
|
||||
other_work with 0
|
||||
other_work with 1
|
||||
work with 120
|
||||
work with 120
|
||||
other_work with 2
|
||||
other_work with 3
|
||||
work with 130
|
||||
work with 130
|
||||
other_work with 4
|
||||
other_work with 5
|
||||
work with 140
|
||||
work with 140
|
||||
other_work with 6
|
||||
other_work with 7
|
||||
work with 150
|
||||
work with 150
|
||||
other_work with 8
|
||||
Ended TaskPool-0_Task-2 <--- here Task-2 makes room in the pool and unblocks `main()`
|
||||
TaskPool-0 is closed!
|
||||
Started TaskPool-0_Task-3
|
||||
other_work with 9
|
||||
`starmap` returned
|
||||
Called `gather`
|
||||
work with 160
|
||||
work with 160
|
||||
other_work with 10
|
||||
other_work with 11
|
||||
work with 170
|
||||
work with 170
|
||||
other_work with 12
|
||||
other_work with 13
|
||||
work with 180
|
||||
work with 180
|
||||
other_work with 14
|
||||
other_work with 15
|
||||
Ended TaskPool-0_Task-0
|
||||
Ended TaskPool-0_Task-1 <--- even though there is room in the pool now, Task-5 will not start
|
||||
Started TaskPool-0_Task-4
|
||||
work with 190
|
||||
work with 190
|
||||
other_work with 16
|
||||
other_work with 20
|
||||
other_work with 17
|
||||
other_work with 21
|
||||
other_work with 18
|
||||
other_work with 22
|
||||
other_work with 19
|
||||
Ended TaskPool-0_Task-3 <--- now that only Task-4 is left, Task-5 will start
|
||||
Started TaskPool-0_Task-5
|
||||
other_work with 23
|
||||
other_work with 30
|
||||
other_work with 24
|
||||
other_work with 31
|
||||
other_work with 25
|
||||
other_work with 32
|
||||
other_work with 26
|
||||
other_work with 33
|
||||
other_work with 27
|
||||
other_work with 34
|
||||
other_work with 28
|
||||
other_work with 35
|
||||
Ended TaskPool-0_Task-4
|
||||
other_work with 29
|
||||
other_work with 36
|
||||
other_work with 37
|
||||
other_work with 38
|
||||
other_work with 39
|
||||
Done.
|
||||
Ended TaskPool-0_Task-5
|
||||
```
|
||||
|
Reference in New Issue
Block a user