xthread.py 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157
  1. # Copyright 2009 Brian Quinlan. All Rights Reserved.
  2. # Licensed to PSF under a Contributor Agreement.
  3. """Implements ThreadPoolExecutor."""
  4. __author__ = 'Brian Quinlan (brian@sweetapp.com)'
  5. import atexit
  6. from concurrent.futures import _base
  7. import itertools
  8. import queue
  9. import threading
  10. import weakref
  11. import os
  12. from fate_flow.settings import stat_logger
  13. # Workers are created as daemon threads. This is done to allow the interpreter
  14. # to exit when there are still idle threads in a ThreadPoolExecutor's thread
  15. # pool (i.e. shutdown() was not called). However, allowing workers to die with
  16. # the interpreter has two undesirable properties:
  17. # - The workers would still be running during interpreter shutdown,
  18. # meaning that they would fail in unpredictable ways.
  19. # - The workers could be killed while evaluating a work item, which could
  20. # be bad if the callable being evaluated has external side-effects e.g.
  21. # writing to a file.
  22. #
  23. # To work around this problem, an exit handler is installed which tells the
  24. # workers to exit when their work queues are empty and then waits until the
  25. # threads finish.
  26. _threads_queues = weakref.WeakKeyDictionary()
  27. _shutdown = False
  28. def _python_exit():
  29. global _shutdown
  30. _shutdown = True
  31. items = list(_threads_queues.items())
  32. for t, q in items:
  33. q.put(None)
  34. for t, q in items:
  35. t.join()
  36. atexit.register(_python_exit)
  37. class _WorkItem(object):
  38. def __init__(self, future, fn, args, kwargs):
  39. self.future = future
  40. self.fn = fn
  41. self.args = args
  42. self.kwargs = kwargs
  43. def run(self):
  44. if not self.future.set_running_or_notify_cancel():
  45. return
  46. try:
  47. result = self.fn(*self.args, **self.kwargs)
  48. except BaseException as exc:
  49. self.future.set_exception(exc)
  50. # Break a reference cycle with the exception 'exc'
  51. self = None
  52. else:
  53. self.future.set_result(result)
  54. def _worker(executor_reference, work_queue):
  55. try:
  56. while True:
  57. stat_logger.info(f"worker queue size is {work_queue.qsize()}")
  58. work_item = work_queue.get(block=True)
  59. if work_item is not None:
  60. work_item.run()
  61. # Delete references to object. See issue16284
  62. del work_item
  63. continue
  64. executor = executor_reference()
  65. # Exit if:
  66. # - The interpreter is shutting down OR
  67. # - The executor that owns the worker has been collected OR
  68. # - The executor that owns the worker has been shutdown.
  69. if _shutdown or executor is None or executor._shutdown:
  70. # Notice other workers
  71. work_queue.put(None)
  72. return
  73. del executor
  74. except BaseException:
  75. _base.LOGGER.critical('Exception in worker', exc_info=True)
  76. class ThreadPoolExecutor(_base.Executor):
  77. # Used to assign unique thread names when thread_name_prefix is not supplied.
  78. _counter = itertools.count().__next__
  79. def __init__(self, max_workers=None, thread_name_prefix=''):
  80. """Initializes a new ThreadPoolExecutor instance.
  81. Args:
  82. max_workers: The maximum number of threads that can be used to
  83. execute the given calls.
  84. thread_name_prefix: An optional name prefix to give our threads.
  85. """
  86. if max_workers is None:
  87. # Use this number because ThreadPoolExecutor is often
  88. # used to overlap I/O instead of CPU work.
  89. max_workers = (os.cpu_count() or 1) * 5
  90. if max_workers <= 0:
  91. raise ValueError("max_workers must be greater than 0")
  92. self._max_workers = max_workers
  93. self._work_queue = queue.Queue()
  94. self._threads = set()
  95. self._shutdown = False
  96. self._shutdown_lock = threading.Lock()
  97. self._thread_name_prefix = (thread_name_prefix or
  98. ("ThreadPoolExecutor-%d" % self._counter()))
  99. def submit(self, fn, *args, **kwargs):
  100. with self._shutdown_lock:
  101. if self._shutdown:
  102. raise RuntimeError('cannot schedule new futures after shutdown')
  103. f = _base.Future()
  104. w = _WorkItem(f, fn, args, kwargs)
  105. self._work_queue.put(w)
  106. self._adjust_thread_count()
  107. return f
  108. submit.__doc__ = _base.Executor.submit.__doc__
  109. def _adjust_thread_count(self):
  110. # When the executor gets lost, the weakref callback will wake up
  111. # the worker threads.
  112. def weakref_cb(_, q=self._work_queue):
  113. q.put(None)
  114. # TODO(bquinlan): Should avoid creating new threads if there are more
  115. # idle threads than items in the work queue.
  116. num_threads = len(self._threads)
  117. if num_threads < self._max_workers:
  118. thread_name = '%s_%d' % (self._thread_name_prefix or self,
  119. num_threads)
  120. t = threading.Thread(name=thread_name, target=_worker,
  121. args=(weakref.ref(self, weakref_cb),
  122. self._work_queue))
  123. t.daemon = True
  124. t.start()
  125. self._threads.add(t)
  126. _threads_queues[t] = self._work_queue
  127. else:
  128. stat_logger.info(f"the number of max workers {self._max_workers} has been exceeded, worker queue size is {self._work_queue.qsize()}")
  129. def shutdown(self, wait=True):
  130. with self._shutdown_lock:
  131. self._shutdown = True
  132. self._work_queue.put(None)
  133. if wait:
  134. for t in self._threads:
  135. t.join()
  136. shutdown.__doc__ = _base.Executor.shutdown.__doc__