1 /*
2  * Hunt - A refined core library for D programming language.
3  *
4  * Copyright (C) 2018-2019 HuntLabs
5  *
6  * Website: https://www.huntlabs.net/
7  *
8  * Licensed under the Apache-2.0 License.
9  *
10  */
11 
12 module hunt.concurrency.ThreadPoolExecutor;
13 
14 import hunt.concurrency.AbstractExecutorService;
15 import hunt.concurrency.AbstractOwnableSynchronizer;
16 import hunt.concurrency.AbstractQueuedSynchronizer;
17 import hunt.concurrency.atomic.AtomicHelper;
18 import hunt.concurrency.BlockingQueue;
19 import hunt.concurrency.Exceptions;
20 import hunt.concurrency.ExecutorService;
21 import hunt.concurrency.Future;
22 import hunt.concurrency.thread.ThreadEx;
23 import hunt.concurrency.ThreadFactory;
24 
25 import hunt.collection;
26 import hunt.Exceptions;
27 import hunt.Functions;
28 import hunt.Integer;
29 import hunt.util.DateTime;
30 import hunt.util.Common;
31 import hunt.util.Runnable;
32 
33 import core.sync.mutex;
34 import core.sync.condition;
35 import core.thread;
36 import std.algorithm;
37 import std.conv;
38 
39 import hunt.logging.ConsoleLogger;
40 
41 // import hunt.collection.ArrayList;
42 // import java.util.ConcurrentModificationException;
43 // import java.util.HashSet;
44 // import java.util.List;
45 // import hunt.concurrency.locks.AbstractQueuedSynchronizer;
46 // import hunt.concurrency.locks.Mutex;
47 
48 /**
49  * An {@link ExecutorService} that executes each submitted task using
50  * one of possibly several pooled threads, normally configured
51  * using {@link Executors} factory methods.
52  *
53  * <p>Thread pools address two different problems: they usually
54  * provide improved performance when executing large numbers of
55  * asynchronous tasks, due to reduced per-task invocation overhead,
56  * and they provide a means of bounding and managing the resources,
57  * including threads, consumed when executing a collection of tasks.
58  * Each {@code ThreadPoolExecutor} also maintains some basic
59  * statistics, such as the number of completed tasks.
60  *
61  * <p>To be useful across a wide range of contexts, this class
62  * provides many adjustable parameters and extensibility
63  * hooks. However, programmers are urged to use the more convenient
64  * {@link Executors} factory methods {@link
65  * Executors#newCachedThreadPool} (unbounded thread pool, with
66  * automatic thread reclamation), {@link Executors#newFixedThreadPool}
67  * (fixed size thread pool) and {@link
68  * Executors#newSingleThreadExecutor} (single background thread), that
69  * preconfigure settings for the most common usage
70  * scenarios. Otherwise, use the following guide when manually
71  * configuring and tuning this class:
72  *
73  * <dl>
74  *
75  * <dt>Core and maximum pool sizes</dt>
76  *
77  * <dd>A {@code ThreadPoolExecutor} will automatically adjust the
78  * pool size (see {@link #getPoolSize})
79  * according to the bounds set by
80  * corePoolSize (see {@link #getCorePoolSize}) and
81  * maximumPoolSize (see {@link #getMaximumPoolSize}).
82  *
83  * When a new task is submitted in method {@link #execute(Runnable)},
84  * if fewer than corePoolSize threads are running, a new thread is
85  * created to handle the request, even if other worker threads are
86  * idle.  Else if fewer than maximumPoolSize threads are running, a
87  * new thread will be created to handle the request only if the queue
88  * is full.  By setting corePoolSize and maximumPoolSize the same, you
89  * create a fixed-size thread pool. By setting maximumPoolSize to an
90  * essentially unbounded value such as {@code Integer.MAX_VALUE}, you
91  * allow the pool to accommodate an arbitrary number of concurrent
92  * tasks. Most typically, core and maximum pool sizes are set only
93  * upon construction, but they may also be changed dynamically using
94  * {@link #setCorePoolSize} and {@link #setMaximumPoolSize}. </dd>
95  *
96  * <dt>On-demand construction</dt>
97  *
98  * <dd>By default, even core threads are initially created and
99  * started only when new tasks arrive, but this can be overridden
100  * dynamically using method {@link #prestartCoreThread} or {@link
101  * #prestartAllCoreThreads}.  You probably want to prestart threads if
102  * you construct the pool with a non-empty queue. </dd>
103  *
104  * <dt>Creating new threads</dt>
105  *
106  * <dd>New threads are created using a {@link ThreadFactory}.  If not
107  * otherwise specified, a {@link Executors#defaultThreadFactory} is
108  * used, that creates threads to all be in the same {@link
109  * ThreadGroupEx} and with the same {@code NORM_PRIORITY} priority and
110  * non-daemon status. By supplying a different ThreadFactory, you can
111  * alter the thread's name, thread group, priority, daemon status,
112  * etc. If a {@code ThreadFactory} fails to create a thread when asked
113  * by returning null from {@code newThread}, the executor will
114  * continue, but might not be able to execute any tasks. Threads
115  * should possess the "modifyThread" {@code RuntimePermission}. If
116  * worker threads or other threads using the pool do not possess this
117  * permission, service may be degraded: configuration changes may not
118  * take effect in a timely manner, and a shutdown pool may remain in a
119  * state in which termination is possible but not completed.</dd>
120  *
121  * <dt>Keep-alive times</dt>
122  *
123  * <dd>If the pool currently has more than corePoolSize threads,
124  * excess threads will be terminated if they have been idle for more
125  * than the keepAliveTime (see {@link #getKeepAliveTime(TimeUnit)}).
126  * This provides a means of reducing resource consumption when the
127  * pool is not being actively used. If the pool becomes more active
128  * later, new threads will be constructed. This parameter can also be
129  * changed dynamically using method {@link #setKeepAliveTime(long,
130  * TimeUnit)}.  Using a value of {@code Long.MAX_VALUE} {@link
131  * TimeUnit#NANOSECONDS} effectively disables idle threads from ever
132  * terminating prior to shut down. By default, the keep-alive policy
133  * applies only when there are more than corePoolSize threads, but
134  * method {@link #allowCoreThreadTimeOut(bool)} can be used to
135  * apply this time-out policy to core threads as well, so long as the
136  * keepAliveTime value is non-zero. </dd>
137  *
138  * <dt>Queuing</dt>
139  *
140  * <dd>Any {@link BlockingQueue} may be used to transfer and hold
141  * submitted tasks.  The use of this queue interacts with pool sizing:
142  *
143  * <ul>
144  *
145  * <li>If fewer than corePoolSize threads are running, the Executor
146  * always prefers adding a new thread
147  * rather than queuing.
148  *
149  * <li>If corePoolSize or more threads are running, the Executor
150  * always prefers queuing a request rather than adding a new
151  * thread.
152  *
153  * <li>If a request cannot be queued, a new thread is created unless
154  * this would exceed maximumPoolSize, in which case, the task will be
155  * rejected.
156  *
157  * </ul>
158  *
159  * There are three general strategies for queuing:
160  * <ol>
161  *
162  * <li><em> Direct handoffs.</em> A good default choice for a work
163  * queue is a {@link SynchronousQueue} that hands off tasks to threads
164  * without otherwise holding them. Here, an attempt to queue a task
165  * will fail if no threads are immediately available to run it, so a
166  * new thread will be constructed. This policy avoids lockups when
167  * handling sets of requests that might have internal dependencies.
168  * Direct handoffs generally require unbounded maximumPoolSizes to
169  * avoid rejection of new submitted tasks. This in turn admits the
170  * possibility of unbounded thread growth when commands continue to
171  * arrive on average faster than they can be processed.
172  *
173  * <li><em> Unbounded queues.</em> Using an unbounded queue (for
174  * example a {@link LinkedBlockingQueue} without a predefined
175  * capacity) will cause new tasks to wait in the queue when all
176  * corePoolSize threads are busy. Thus, no more than corePoolSize
177  * threads will ever be created. (And the value of the maximumPoolSize
178  * therefore doesn't have any effect.)  This may be appropriate when
179  * each task is completely independent of others, so tasks cannot
180  * affect each others execution; for example, in a web page server.
181  * While this style of queuing can be useful in smoothing out
182  * bursts of requests, it admits the possibility of
183  * unbounded work queue growth when commands continue to arrive on
184  * average faster than they can be processed.
185  *
186  * <li><em>Bounded queues.</em> A bounded queue (for example, an
187  * {@link ArrayBlockingQueue}) helps prevent resource exhaustion when
188  * used with finite maximumPoolSizes, but can be more difficult to
189  * tune and control.  Queue sizes and maximum pool sizes may be traded
190  * off for each other: Using large queues and small pools minimizes
191  * CPU usage, OS resources, and context-switching overhead, but can
192  * lead to artificially low throughput.  If tasks frequently block (for
193  * example if they are I/O bound), a system may be able to schedule
194  * time for more threads than you otherwise allow. Use of small queues
195  * generally requires larger pool sizes, which keeps CPUs busier but
196  * may encounter unacceptable scheduling overhead, which also
197  * decreases throughput.
198  *
199  * </ol>
200  *
201  * </dd>
202  *
203  * <dt>Rejected tasks</dt>
204  *
205  * <dd>New tasks submitted in method {@link #execute(Runnable)} will be
206  * <em>rejected</em> when the Executor has been shut down, and also when
207  * the Executor uses finite bounds for both maximum threads and work queue
208  * capacity, and is saturated.  In either case, the {@code execute} method
209  * invokes the {@link
210  * RejectedExecutionHandler#rejectedExecution(Runnable, ThreadPoolExecutor)}
211  * method of its {@link RejectedExecutionHandler}.  Four predefined handler
212  * policies are provided:
213  *
214  * <ol>
215  *
216  * <li>In the default {@link ThreadPoolExecutor.AbortPolicy}, the handler
217  * throws a runtime {@link RejectedExecutionException} upon rejection.
218  *
219  * <li>In {@link ThreadPoolExecutor.CallerRunsPolicy}, the thread
220  * that invokes {@code execute} itself runs the task. This provides a
221  * simple feedback control mechanism that will slow down the rate that
222  * new tasks are submitted.
223  *
224  * <li>In {@link ThreadPoolExecutor.DiscardPolicy}, a task that
225  * cannot be executed is simply dropped.
226  *
227  * <li>In {@link ThreadPoolExecutor.DiscardOldestPolicy}, if the
228  * executor is not shut down, the task at the head of the work queue
229  * is dropped, and then execution is retried (which can fail again,
230  * causing this to be repeated.)
231  *
232  * </ol>
233  *
234  * It is possible to define and use other kinds of {@link
235  * RejectedExecutionHandler} classes. Doing so requires some care
236  * especially when policies are designed to work only under particular
237  * capacity or queuing policies. </dd>
238  *
239  * <dt>Hook methods</dt>
240  *
241  * <dd>This class provides {@code protected} overridable
242  * {@link #beforeExecute(Thread, Runnable)} and
243  * {@link #afterExecute(Runnable, Throwable)} methods that are called
244  * before and after execution of each task.  These can be used to
245  * manipulate the execution environment; for example, reinitializing
246  * ThreadLocals, gathering statistics, or adding log entries.
247  * Additionally, method {@link #terminated} can be overridden to perform
248  * any special processing that needs to be done once the Executor has
249  * fully terminated.
250  *
251  * <p>If hook, callback, or BlockingQueue methods throw exceptions,
252  * internal worker threads may in turn fail, abruptly terminate, and
253  * possibly be replaced.</dd>
254  *
255  * <dt>Queue maintenance</dt>
256  *
257  * <dd>Method {@link #getQueue()} allows access to the work queue
258  * for purposes of monitoring and debugging.  Use of this method for
259  * any other purpose is strongly discouraged.  Two supplied methods,
260  * {@link #remove(Runnable)} and {@link #purge} are available to
261  * assist in storage reclamation when large numbers of queued tasks
262  * become cancelled.</dd>
263  *
264  * <dt>Reclamation</dt>
265  *
266  * <dd>A pool that is no longer referenced in a program <em>AND</em>
267  * has no remaining threads may be reclaimed (garbage collected)
268  * without being explicitly shutdown. You can configure a pool to
269  * allow all unused threads to eventually die by setting appropriate
270  * keep-alive times, using a lower bound of zero core threads and/or
271  * setting {@link #allowCoreThreadTimeOut(bool)}.  </dd>
272  *
273  * </dl>
274  *
275  * <p><b>Extension example</b>. Most extensions of this class
276  * override one or more of the protected hook methods. For example,
277  * here is a subclass that adds a simple pause/resume feature:
278  *
279  * <pre> {@code
280  * class PausableThreadPoolExecutor : ThreadPoolExecutor {
281  *   private bool isPaused;
282  *   private Mutex pauseLock = new Mutex();
283  *   private Condition unpaused = pauseLock.newCondition();
284  *
285  *   PausableThreadPoolExecutor(...) { super(...); }
286  *
287  *   protected void beforeExecute(Thread t, Runnable r) {
288  *     super.beforeExecute(t, r);
289  *     pauseLock.lock();
290  *     try {
291  *       while (isPaused) unpaused.await();
292  *     } catch (InterruptedException ie) {
293  *       t.interrupt();
294  *     } finally {
295  *       pauseLock.unlock();
296  *     }
297  *   }
298  *
299  *   void pause() {
300  *     pauseLock.lock();
301  *     try {
302  *       isPaused = true;
303  *     } finally {
304  *       pauseLock.unlock();
305  *     }
306  *   }
307  *
308  *   void resume() {
309  *     pauseLock.lock();
310  *     try {
311  *       isPaused = false;
312  *       unpaused.notifyAll();
313  *     } finally {
314  *       pauseLock.unlock();
315  *     }
316  *   }
317  * }}</pre>
318  *
319  * @author Doug Lea
320  */
321 class ThreadPoolExecutor : AbstractExecutorService {
322     /**
323      * The main pool control state, ctl, is an atomic integer packing
324      * two conceptual fields
325      *   workerCount, indicating the effective number of threads
326      *   runState,    indicating whether running, shutting down etc
327      *
328      * In order to pack them into one int, we limit workerCount to
329      * (2^29)-1 (about 500 million) threads rather than (2^31)-1 (2
330      * billion) otherwise representable. If this is ever an issue in
331      * the future, the variable can be changed to be an AtomicLong,
332      * and the shift/mask constants below adjusted. But until the need
333      * arises, this code is a bit faster and simpler using an int.
334      *
335      * The workerCount is the number of workers that have been
336      * permitted to start and not permitted to stop.  The value may be
337      * transiently different from the actual number of live threads,
338      * for example when a ThreadFactory fails to create a thread when
339      * asked, and when exiting threads are still performing
340      * bookkeeping before terminating. The user-visible pool size is
341      * reported as the current size of the workers set.
342      *
343      * The runState provides the main lifecycle control, taking on values:
344      *
345      *   RUNNING:  Accept new tasks and process queued tasks
346      *   SHUTDOWN: Don't accept new tasks, but process queued tasks
347      *   STOP:     Don't accept new tasks, don't process queued tasks,
348      *             and interrupt in-progress tasks
349      *   TIDYING:  All tasks have terminated, workerCount is zero,
350      *             the thread transitioning to state TIDYING
351      *             will run the terminated() hook method
352      *   TERMINATED: terminated() has completed
353      *
354      * The numerical order among these values matters, to allow
355      * ordered comparisons. The runState monotonically increases over
356      * time, but need not hit each state. The transitions are:
357      *
358      * RUNNING -> SHUTDOWN
359      *    On invocation of shutdown()
360      * (RUNNING or SHUTDOWN) -> STOP
361      *    On invocation of shutdownNow()
362      * SHUTDOWN -> TIDYING
363      *    When both queue and pool are empty
364      * STOP -> TIDYING
365      *    When pool is empty
366      * TIDYING -> TERMINATED
367      *    When the terminated() hook method has completed
368      *
369      * Threads waiting in awaitTermination() will return when the
370      * state reaches TERMINATED.
371      *
372      * Detecting the transition from SHUTDOWN to TIDYING is less
373      * straightforward than you'd like because the queue may become
374      * empty after non-empty and vice versa during SHUTDOWN state, but
375      * we can only terminate if, after seeing that it is empty, we see
376      * that workerCount is 0 (which sometimes entails a recheck -- see
377      * below).
378      */
379     private shared(int) ctl; // = new AtomicInteger(ctlOf(RUNNING, 0));
380     private enum int COUNT_BITS = Integer.SIZE - 3;
381     private enum int COUNT_MASK = (1 << COUNT_BITS) - 1;
382 
383     // runState is stored in the high-order bits
384     private enum int RUNNING    = -1 << COUNT_BITS;
385     private enum int SHUTDOWN   =  0 << COUNT_BITS;
386     private enum int STOP       =  1 << COUNT_BITS;
387     private enum int TIDYING    =  2 << COUNT_BITS;
388     private enum int TERMINATED =  3 << COUNT_BITS;
389 
390     /**
391      * The queue used for holding tasks and handing off to worker
392      * threads.  We do not require that workQueue.poll() returning
393      * null necessarily means that workQueue.isEmpty(), so rely
394      * solely on isEmpty to see if the queue is empty (which we must
395      * do for example when deciding whether to transition from
396      * SHUTDOWN to TIDYING).  This accommodates special-purpose
397      * queues such as DelayQueues for which poll() is allowed to
398      * return null even if it may later return non-null when delays
399      * expire.
400      */
401     private BlockingQueue!(Runnable) workQueue;
402 
403     /**
404      * Lock held on access to workers set and related bookkeeping.
405      * While we could use a concurrent set of some sort, it turns out
406      * to be generally preferable to use a lock. Among the reasons is
407      * that this serializes interruptIdleWorkers, which avoids
408      * unnecessary interrupt storms, especially during shutdown.
409      * Otherwise exiting threads would concurrently interrupt those
410      * that have not yet interrupted. It also simplifies some of the
411      * associated statistics bookkeeping of largestPoolSize etc. We
412      * also hold mainLock on shutdown and shutdownNow, for the sake of
413      * ensuring workers set is stable while separately checking
414      * permission to interrupt and actually interrupting.
415      */
416     private Mutex mainLock;
417 
418     /**
419      * Set containing all worker threads in pool. Accessed only when
420      * holding mainLock.
421      */
422     private HashSet!(Worker) workers;
423 
424     /**
425      * Wait condition to support awaitTermination.
426      */
427     private Condition termination;
428 
429     /**
430      * Tracks largest attained pool size. Accessed only under
431      * mainLock.
432      */
433     private int largestPoolSize;
434 
435     /**
436      * Counter for completed tasks. Updated only on termination of
437      * worker threads. Accessed only under mainLock.
438      */
439     private long completedTaskCount;
440 
441     /*
442      * All user control parameters are declared as volatiles so that
443      * ongoing actions are based on freshest values, but without need
444      * for locking, since no internal invariants depend on them
445      * changing synchronously with respect to other actions.
446      */
447 
448     /**
449      * Factory for new threads. All threads are created using this
450      * factory (via method addWorker).  All callers must be prepared
451      * for addWorker to fail, which may reflect a system or user's
452      * policy limiting the number of threads.  Even though it is not
453      * treated as an error, failure to create threads may result in
454      * new tasks being rejected or existing ones remaining stuck in
455      * the queue.
456      *
457      * We go further and preserve pool invariants even in the face of
458      * errors such as OutOfMemoryError, that might be thrown while
459      * trying to create threads.  Such errors are rather common due to
460      * the need to allocate a native stack in Thread.start, and users
461      * will want to perform clean pool shutdown to clean up.  There
462      * will likely be enough memory available for the cleanup code to
463      * complete without encountering yet another OutOfMemoryError.
464      */
465     private ThreadFactory threadFactory;
466 
467     /**
468      * Handler called when saturated or shutdown in execute.
469      */
470     private RejectedExecutionHandler handler;
471 
472     /**
473      * Timeout in nanoseconds for idle threads waiting for work.
474      * Threads use this timeout when there are more than corePoolSize
475      * present or if allowCoreThreadTimeOut. Otherwise they wait
476      * forever for new work.
477      */
478     private long keepAliveTime;
479 
480     /**
481      * If false (default), core threads stay alive even when idle.
482      * If true, core threads use keepAliveTime to time out waiting
483      * for work.
484      */
485     private bool _allowCoreThreadTimeOut;
486 
487     /**
488      * Core pool size is the minimum number of workers to keep alive
489      * (and not allow to time out etc) unless allowCoreThreadTimeOut
490      * is set, in which case the minimum is zero.
491      *
492      * Since the worker count is actually stored in COUNT_BITS bits,
493      * the effective limit is {@code corePoolSize & COUNT_MASK}.
494      */
495     private int corePoolSize;
496 
497     /**
498      * Maximum pool size.
499      *
500      * Since the worker count is actually stored in COUNT_BITS bits,
501      * the effective limit is {@code maximumPoolSize & COUNT_MASK}.
502      */
503     private int maximumPoolSize;
504 
505     /**
506      * Permission required for callers of shutdown and shutdownNow.
507      * We additionally require (see checkShutdownAccess) that callers
508      * have permission to actually interrupt threads in the worker set
509      * (as governed by Thread.interrupt, which relies on
510      * ThreadGroupEx.checkAccess, which in turn relies on
511      * SecurityManager.checkAccess). Shutdowns are attempted only if
512      * these checks pass.
513      *
514      * All actual invocations of Thread.interrupt (see
515      * interruptIdleWorkers and interruptWorkers) ignore
516      * SecurityExceptions, meaning that the attempted interrupts
517      * silently fail. In the case of shutdown, they should not fail
518      * unless the SecurityManager has inconsistent policies, sometimes
519      * allowing access to a thread and sometimes not. In such cases,
520      * failure to actually interrupt threads may disable or delay full
521      * termination. Other uses of interruptIdleWorkers are advisory,
522      * and failure to actually interrupt will merely delay response to
523      * configuration changes so is not handled exceptionally.
524      */
525     // private __gshared RuntimePermission shutdownPerm =
526     //     new RuntimePermission("modifyThread");
527 
528 
529     /**
530      * The default rejected execution handler.
531      */
532     private __gshared RejectedExecutionHandler defaultHandler;
533 
534 
535     shared static this() {
536         defaultHandler = new AbortPolicy();
537     }
538 
539     private void initialize() {
540         mainLock = new Mutex();
541         termination = new Condition(mainLock);
542         ctl = ctlOf(RUNNING, 0);
543         workers = new HashSet!(Worker)();
544     }
545 
546     // Packing and unpacking ctl
547     private static int runStateOf(int c)     { return c & ~COUNT_MASK; }
548     private static int workerCountOf(int c)  { return c & COUNT_MASK; }
549     private static int ctlOf(int rs, int wc) { return rs | wc; }
550 
551     /*
552      * Bit field accessors that don't require unpacking ctl.
553      * These depend on the bit layout and on workerCount being never negative.
554      */
555 
556     private static bool runStateLessThan(int c, int s) {
557         return c < s;
558     }
559 
560     private static bool runStateAtLeast(int c, int s) {
561         return c >= s;
562     }
563 
564     private static bool isRunning(int c) {
565         return c < SHUTDOWN;
566     }
567 
568     /**
569      * Attempts to CAS-increment the workerCount field of ctl.
570      */
571     private bool compareAndIncrementWorkerCount(int expect) {
572         return AtomicHelper.compareAndSet(ctl, expect, expect + 1);
573     }
574 
575     /**
576      * Attempts to CAS-decrement the workerCount field of ctl.
577      */
578     private bool compareAndDecrementWorkerCount(int expect) {
579         return AtomicHelper.compareAndSet(ctl, expect, expect - 1);
580     }
581 
582     /**
583      * Decrements the workerCount field of ctl. This is called only on
584      * abrupt termination of a thread (see processWorkerExit). Other
585      * decrements are performed within getTask.
586      */
587     private void decrementWorkerCount() {
588         AtomicHelper.decrement(ctl);
589     }
590     
591     /**
592      * Class Worker mainly maintains interrupt control state for
593      * threads running tasks, along with other minor bookkeeping.
594      * This class opportunistically extends AbstractQueuedSynchronizer
595      * to simplify acquiring and releasing a lock surrounding each
596      * task execution.  This protects against interrupts that are
597      * intended to wake up a worker thread waiting for a task from
598      * instead interrupting a task being run.  We implement a simple
599      * non-reentrant mutual exclusion lock rather than use
600      * Mutex because we do not want worker tasks to be able to
601      * reacquire the lock when they invoke pool control methods like
602      * setCorePoolSize.  Additionally, to suppress interrupts until
603      * the thread actually starts running tasks, we initialize lock
604      * state to a negative value, and clear it upon start (in
605      * runWorker).
606      */
607     private final class Worker : AbstractQueuedSynchronizer, Runnable
608     {
609         /** Thread this worker is running in.  Null if factory fails. */
610         Thread thread;
611         /** Initial task to run.  Possibly null. */
612         Runnable firstTask;
613         /** Per-thread task counter */
614         long completedTasks;
615 
616         // TODO: switch to AbstractQueuedLongSynchronizer and move
617         // completedTasks into the lock word.
618 
619         /**
620          * Creates with given first task and thread from ThreadFactory.
621          * @param firstTask the first task (null if none)
622          */
623         this(Runnable firstTask) {
624             setState(-1); // inhibit interrupts until runWorker
625             this.firstTask = firstTask;
626             this.thread = getThreadFactory().newThread(new class Runnable {
627                 void run() {
628                     runWorker(this.outer);
629                 }
630             });
631         }
632 
633         /** Delegates main run loop to outer runWorker. */
634         void run() {
635             runWorker(this);
636         }
637 
638         // Lock methods
639         //
640         // The value 0 represents the unlocked state.
641         // The value 1 represents the locked state.
642 
643         protected override bool isHeldExclusively() {
644             return getState() != 0;
645         }
646 
647         protected override bool tryAcquire(int unused) {
648             if (compareAndSetState(0, 1)) { 
649                 setExclusiveOwnerThread(Thread.getThis());
650                 return true;
651             }
652             return false;
653         }
654 
655         protected override bool tryRelease(int unused) {
656             setExclusiveOwnerThread(null);
657             setState(0);
658             return true;
659         }
660 
661         void lock()        { acquire(1); }
662         bool tryLock()  { return tryAcquire(1); }
663         void unlock()      { release(1); }
664         bool isLocked() { return isHeldExclusively(); }
665 
666         void interruptIfStarted() {
667             ThreadEx t;
668             if (getState() >= 0 && (t = cast(ThreadEx)thread) !is null && !t.isInterrupted()) {
669                 try {
670                     t.interrupt();
671                 } catch (Exception ignore) {
672                     version(HUNT_DEBUG) warning(ignore.msg);
673                 }
674             }
675         }
676     }
677 
678     /*
679      * Methods for setting control state
680      */
681 
682     /**
683      * Transitions runState to given target, or leaves it alone if
684      * already at least the given target.
685      *
686      * @param targetState the desired state, either SHUTDOWN or STOP
687      *        (but not TIDYING or TERMINATED -- use tryTerminate for that)
688      */
689     private void advanceRunState(int targetState) {
690         // assert targetState == SHUTDOWN || targetState == STOP;
691         for (;;) {
692             int c = ctl;
693             if (runStateAtLeast(c, targetState) ||
694                 AtomicHelper.compareAndSet(ctl, c, ctlOf(targetState, workerCountOf(c))))
695                 break;
696         }
697     }
698 
699     /**
700      * Transitions to TERMINATED state if either (SHUTDOWN and pool
701      * and queue empty) or (STOP and pool empty).  If otherwise
702      * eligible to terminate but workerCount is nonzero, interrupts an
703      * idle worker to ensure that shutdown signals propagate. This
704      * method must be called following any action that might make
705      * termination possible -- reducing worker count or removing tasks
706      * from the queue during shutdown. The method is non-private to
707      * allow access from ScheduledThreadPoolExecutor.
708      */
709     final void tryTerminate() {
710         for (;;) {
711             int c = ctl;
712             if (isRunning(c) ||
713                 runStateAtLeast(c, TIDYING) ||
714                 (runStateLessThan(c, STOP) && ! workQueue.isEmpty()))
715                 return;
716             if (workerCountOf(c) != 0) { // Eligible to terminate
717                 interruptIdleWorkers(ONLY_ONE);
718                 return;
719             }
720 
721             Mutex mainLock = this.mainLock;
722             mainLock.lock();
723             try {
724                 if (AtomicHelper.compareAndSet(ctl, c, ctlOf(TIDYING, 0))) {
725                     try {
726                         terminated();
727                     } finally {
728                         ctl = ctlOf(TERMINATED, 0);
729                         termination.notifyAll();
730                     }
731                     return;
732                 }
733             } finally {
734                 mainLock.unlock();
735             }
736             // else retry on failed CAS
737         }
738     }
739 
740     /*
741      * Methods for controlling interrupts to worker threads.
742      */
743 
744     /**
745      * If there is a security manager, makes sure caller has
746      * permission to shut down threads in general (see shutdownPerm).
747      * If this passes, additionally makes sure the caller is allowed
748      * to interrupt each worker thread. This might not be true even if
749      * first check passed, if the SecurityManager treats some threads
750      * specially.
751      */
752     private void checkShutdownAccess() {
753         // FIXME: Needing refactor or cleanup -@zxp at 1/2/2019, 2:12:25 AM
754         // remove this
755         // debug implementationMissing(false);
756         // assert mainLock.isHeldByCurrentThread();
757         // SecurityManager security = System.getSecurityManager();
758         // if (security !is null) {
759         //     security.checkPermission(shutdownPerm);
760         //     for (Worker w : workers)
761         //         security.checkAccess(w.thread);
762         // }
763     }
764 
765     /**
766      * Interrupts all threads, even if active. Ignores SecurityExceptions
767      * (in which case some threads may remain uninterrupted).
768      */
769     private void interruptWorkers() {
770         // assert mainLock.isHeldByCurrentThread();
771         foreach (Worker w ; workers)
772             w.interruptIfStarted();
773     }
774 
775     /**
776      * Interrupts threads that might be waiting for tasks (as
777      * indicated by not being locked) so they can check for
778      * termination or configuration changes. Ignores
779      * SecurityExceptions (in which case some threads may remain
780      * uninterrupted).
781      *
782      * @param onlyOne If true, interrupt at most one worker. This is
783      * called only from tryTerminate when termination is otherwise
784      * enabled but there are still other workers.  In this case, at
785      * most one waiting worker is interrupted to propagate shutdown
786      * signals in case all threads are currently waiting.
787      * Interrupting any arbitrary thread ensures that newly arriving
788      * workers since shutdown began will also eventually exit.
789      * To guarantee eventual termination, it suffices to always
790      * interrupt only one idle worker, but shutdown() interrupts all
791      * idle workers so that redundant workers exit promptly, not
792      * waiting for a straggler task to finish.
793      */
794     private void interruptIdleWorkers(bool onlyOne) {
795         Mutex mainLock = this.mainLock;
796         mainLock.lock();
797         try {
798             foreach(Worker w ; workers) {
799                 ThreadEx t = cast(ThreadEx)w.thread;
800                 if (t !is null && !t.isInterrupted() && w.tryLock()) {
801                     try {
802                         t.interrupt();
803                     } catch (Exception ignore) {
804                         version(HUNT_DEBUG) {
805                             warning(ignore.toString());
806                         }
807                     } finally {
808                         w.unlock();
809                     }
810                 }
811                 if (onlyOne)
812                     break;
813             }
814         } finally {
815             mainLock.unlock();
816         }
817     }
818 
819     /**
820      * Common form of interruptIdleWorkers, to avoid having to
821      * remember what the bool argument means.
822      */
823     private void interruptIdleWorkers() {
824         interruptIdleWorkers(false);
825     }
826 
827     private enum bool ONLY_ONE = true;
828 
829     /*
830      * Misc utilities, most of which are also exported to
831      * ScheduledThreadPoolExecutor
832      */
833 
834     /**
835      * Invokes the rejected execution handler for the given command.
836      * Package-protected for use by ScheduledThreadPoolExecutor.
837      */
838     final void reject(Runnable command) {
839         handler.rejectedExecution(command, this);
840     }
841 
842     /**
843      * Performs any further cleanup following run state transition on
844      * invocation of shutdown.  A no-op here, but used by
845      * ScheduledThreadPoolExecutor to cancel delayed tasks.
846      */
847     void onShutdown() {
848     }
849 
850     /**
851      * Drains the task queue into a new list, normally using
852      * drainTo. But if the queue is a DelayQueue or any other kind of
853      * queue for which poll or drainTo may fail to remove some
854      * elements, it deletes them one by one.
855      */
856     private List!(Runnable) drainQueue() {
857         BlockingQueue!(Runnable) q = workQueue;
858         ArrayList!(Runnable) taskList = new ArrayList!(Runnable)();
859         q.drainTo(taskList);
860         if (!q.isEmpty()) {
861             foreach (Runnable r ; q.toArray()) {
862                 if (q.remove(r))
863                     taskList.add(r);
864             }
865         }
866         return taskList;
867     }
868 
869     /*
870      * Methods for creating, running and cleaning up after workers
871      */
872 
873     /**
874      * Checks if a new worker can be added with respect to current
875      * pool state and the given bound (either core or maximum). If so,
876      * the worker count is adjusted accordingly, and, if possible, a
877      * new worker is created and started, running firstTask as its
878      * first task. This method returns false if the pool is stopped or
879      * eligible to shut down. It also returns false if the thread
880      * factory fails to create a thread when asked.  If the thread
881      * creation fails, either due to the thread factory returning
882      * null, or due to an exception (typically OutOfMemoryError in
883      * Thread.start()), we roll back cleanly.
884      *
885      * @param firstTask the task the new thread should run first (or
886      * null if none). Workers are created with an initial first task
887      * (in method execute()) to bypass queuing when there are fewer
888      * than corePoolSize threads (in which case we always start one),
889      * or when the queue is full (in which case we must bypass queue).
890      * Initially idle threads are usually created via
891      * prestartCoreThread or to replace other dying workers.
892      *
893      * @param core if true use corePoolSize as bound, else
894      * maximumPoolSize. (A bool indicator is used here rather than a
895      * value to ensure reads of fresh values after checking other pool
896      * state).
897      * @return true if successful
898      */
899     private bool addWorker(Runnable firstTask, bool core) {
900         retry:
901         for (int c = ctl;;) {
902             // Check if queue empty only if necessary.
903             if (runStateAtLeast(c, SHUTDOWN)
904                 && (runStateAtLeast(c, STOP)
905                     || firstTask !is null
906                     || workQueue.isEmpty()))
907                 return false;
908 
909             for (;;) {
910                 if (workerCountOf(c)
911                     >= ((core ? corePoolSize : maximumPoolSize) & COUNT_MASK))
912                     return false;
913                 if (compareAndIncrementWorkerCount(c))
914                     break retry;
915                 c = ctl;  // Re-read ctl
916                 if (runStateAtLeast(c, SHUTDOWN))
917                     continue retry;
918                 // else CAS failed due to workerCount change; retry inner loop
919             }
920         }
921 
922         bool workerStarted = false;
923         bool workerAdded = false;
924         Worker w = null;
925         try {
926             w = new Worker(firstTask);
927             Thread t = w.thread;
928             if (t !is null) {
929                 Mutex mainLock = this.mainLock;
930                 mainLock.lock();
931                 try {
932                     // Recheck while holding lock.
933                     // Back out on ThreadFactory failure or if
934                     // shut down before lock acquired.
935                     int c = ctl;
936 
937                     if (isRunning(c) ||
938                         (runStateLessThan(c, STOP) && firstTask is null)) {
939                         // implementationMissing(false);
940                         // TODO: Tasks pending completion -@zxp at 10/18/2018, 9:14:13 AM
941                         // 
942                         // if (t.isAlive()) // precheck that t is startable
943                         //     throw new IllegalThreadStateException();
944                         workers.add(w);
945                         int s = workers.size();
946                         if (s > largestPoolSize)
947                             largestPoolSize = s;
948                         workerAdded = true;
949                     }
950                 } finally {
951                     mainLock.unlock();
952                 }
953                 if (workerAdded) {
954                     t.start();
955                     workerStarted = true;
956                 }
957             }
958         } finally {
959             if (! workerStarted)
960                 addWorkerFailed(w);
961         }
962         return workerStarted;
963     }
964 
965     /**
966      * Rolls back the worker thread creation.
967      * - removes worker from workers, if present
968      * - decrements worker count
969      * - rechecks for termination, in case the existence of this
970      *   worker was holding up termination
971      */
972     private void addWorkerFailed(Worker w) {
973         Mutex mainLock = this.mainLock;
974         mainLock.lock();
975         try {
976             if (w !is null)
977                 workers.remove(w);
978             decrementWorkerCount();
979             tryTerminate();
980         } finally {
981             mainLock.unlock();
982         }
983     }
984 
985     /**
986      * Performs cleanup and bookkeeping for a dying worker. Called
987      * only from worker threads. Unless completedAbruptly is set,
988      * assumes that workerCount has already been adjusted to account
989      * for exit.  This method removes thread from worker set, and
990      * possibly terminates the pool or replaces the worker if either
991      * it exited due to user task exception or if fewer than
992      * corePoolSize workers are running or queue is non-empty but
993      * there are no workers.
994      *
995      * @param w the worker
996      * @param completedAbruptly if the worker died due to user exception
997      */
998     private void processWorkerExit(Worker w, bool completedAbruptly) {
999         if (completedAbruptly) // If abrupt, then workerCount wasn't adjusted
1000             decrementWorkerCount();
1001 
1002         Mutex mainLock = this.mainLock;
1003         mainLock.lock();
1004         try {
1005             completedTaskCount += w.completedTasks;
1006             workers.remove(w);
1007         } finally {
1008             mainLock.unlock();
1009         }
1010 
1011         tryTerminate();
1012 
1013         int c = ctl;
1014         if (runStateLessThan(c, STOP)) {
1015             if (!completedAbruptly) {
1016                 int min = _allowCoreThreadTimeOut ? 0 : corePoolSize;
1017                 if (min == 0 && ! workQueue.isEmpty())
1018                     min = 1;
1019                 if (workerCountOf(c) >= min)
1020                     return; // replacement not needed
1021             }
1022             addWorker(null, false);
1023         }
1024     }
1025 
1026     /**
1027      * Performs blocking or timed wait for a task, depending on
1028      * current configuration settings, or returns null if this worker
1029      * must exit because of any of:
1030      * 1. There are more than maximumPoolSize workers (due to
1031      *    a call to setMaximumPoolSize).
1032      * 2. The pool is stopped.
1033      * 3. The pool is shutdown and the queue is empty.
1034      * 4. This worker timed out waiting for a task, and timed-out
1035      *    workers are subject to termination (that is,
1036      *    {@code allowCoreThreadTimeOut || workerCount > corePoolSize})
1037      *    both before and after the timed wait, and if the queue is
1038      *    non-empty, this worker is not the last thread in the pool.
1039      *
1040      * @return task, or null if the worker must exit, in which case
1041      *         workerCount is decremented
1042      */
1043     private Runnable getTask() {
1044         bool timedOut = false; // Did the last poll() time out?
1045 
1046         for (;;) {
1047             int c = ctl;
1048 
1049             // Check if queue empty only if necessary.
1050             if (runStateAtLeast(c, SHUTDOWN)
1051                 && (runStateAtLeast(c, STOP) || workQueue.isEmpty())) {
1052                 decrementWorkerCount();
1053                 return null;
1054             }
1055 
1056             int wc = workerCountOf(c);
1057 
1058             // Are workers subject to culling?
1059             bool timed = _allowCoreThreadTimeOut || wc > corePoolSize;
1060 
1061             if ((wc > maximumPoolSize || (timed && timedOut))
1062                 && (wc > 1 || workQueue.isEmpty())) {
1063                 if (compareAndDecrementWorkerCount(c))
1064                     return null;
1065                 continue;
1066             }
1067 
1068             try {
1069                 Runnable r = timed ?
1070                     workQueue.poll(dur!(TimeUnit.HectoNanosecond)(keepAliveTime)) :
1071                     workQueue.take();
1072                 if (r !is null)
1073                     return r;
1074                 timedOut = true;
1075             } catch (InterruptedException retry) {
1076                 timedOut = false;
1077             }
1078         }
1079     }
1080 
1081     /**
1082      * Main worker run loop.  Repeatedly gets tasks from queue and
1083      * executes them, while coping with a number of issues:
1084      *
1085      * 1. We may start out with an initial task, in which case we
1086      * don't need to get the first one. Otherwise, as long as pool is
1087      * running, we get tasks from getTask. If it returns null then the
1088      * worker exits due to changed pool state or configuration
1089      * parameters.  Other exits result from exception throws in
1090      * external code, in which case completedAbruptly holds, which
1091      * usually leads processWorkerExit to replace this thread.
1092      *
1093      * 2. Before running any task, the lock is acquired to prevent
1094      * other pool interrupts while the task is executing, and then we
1095      * ensure that unless pool is stopping, this thread does not have
1096      * its interrupt set.
1097      *
1098      * 3. Each task run is preceded by a call to beforeExecute, which
1099      * might throw an exception, in which case we cause thread to die
1100      * (breaking loop with completedAbruptly true) without processing
1101      * the task.
1102      *
1103      * 4. Assuming beforeExecute completes normally, we run the task,
1104      * gathering any of its thrown exceptions to send to afterExecute.
1105      * We separately handle RuntimeException, Error (both of which the
1106      * specs guarantee that we trap) and arbitrary Throwables.
1107      * Because we cannot rethrow Throwables within Runnable.run, we
1108      * wrap them within Errors on the way out (to the thread's
1109      * UncaughtExceptionHandler).  Any thrown exception also
1110      * conservatively causes thread to die.
1111      *
1112      * 5. After task.run completes, we call afterExecute, which may
1113      * also throw an exception, which will also cause thread to
1114      * die. According to JLS Sec 14.20, this exception is the one that
1115      * will be in effect even if task.run throws.
1116      *
1117      * The net effect of the exception mechanics is that afterExecute
1118      * and the thread's UncaughtExceptionHandler have as accurate
1119      * information as we can provide about any problems encountered by
1120      * user code.
1121      *
1122      * @param w the worker
1123      */
1124     final void runWorker(Worker w) {
1125         Thread wt = Thread.getThis();
1126         Runnable task = w.firstTask;
1127         w.firstTask = null;
1128         w.unlock(); // allow interrupts
1129         bool completedAbruptly = true;
1130         try {
1131             while (task !is null || (task = getTask()) !is null) {
1132                 w.lock();
1133                 // If pool is stopping, ensure thread is interrupted;
1134                 // if not, ensure thread is not interrupted.  This
1135                 // requires a recheck in second case to deal with
1136                 // shutdownNow race while clearing interrupt
1137 
1138                 // implementationMissing(false);
1139                 // if ((runStateAtLeast(ctl, STOP) ||
1140                 //      (Thread.interrupted() &&
1141                 //       runStateAtLeast(ctl, STOP))) &&
1142                 //     !wt.isInterrupted())
1143                 //     wt.interrupt();
1144                 try {
1145                     beforeExecute(wt, task);
1146                     try {
1147                         task.run();
1148                         afterExecute(task, null);
1149                     } catch (Throwable ex) {
1150                         afterExecute(task, ex);
1151                         throw ex;
1152                     }
1153                 } finally {
1154                     task = null;
1155                     w.completedTasks++;
1156                     w.unlock();
1157                 }
1158             }
1159             completedAbruptly = false;
1160         } finally {
1161             processWorkerExit(w, completedAbruptly);
1162         }
1163     }
1164 
1165     // constructors and methods
1166 
1167     /**
1168      * Creates a new {@code ThreadPoolExecutor} with the given initial
1169      * parameters, the default thread factory and the default rejected
1170      * execution handler.
1171      *
1172      * <p>It may be more convenient to use one of the {@link Executors}
1173      * factory methods instead of this general purpose constructor.
1174      *
1175      * @param corePoolSize the number of threads to keep in the pool, even
1176      *        if they are idle, unless {@code allowCoreThreadTimeOut} is set
1177      * @param maximumPoolSize the maximum number of threads to allow in the
1178      *        pool
1179      * @param keepAliveTime when the number of threads is greater than
1180      *        the core, this is the maximum time that excess idle threads
1181      *        will wait for new tasks before terminating.
1182      * @param workQueue the queue to use for holding tasks before they are
1183      *        executed.  This queue will hold only the {@code Runnable}
1184      *        tasks submitted by the {@code execute} method.
1185      * @throws IllegalArgumentException if one of the following holds:<br>
1186      *         {@code corePoolSize < 0}<br>
1187      *         {@code keepAliveTime < 0}<br>
1188      *         {@code maximumPoolSize <= 0}<br>
1189      *         {@code maximumPoolSize < corePoolSize}
1190      * @throws NullPointerException if {@code workQueue} is null
1191      */
1192     this(int corePoolSize, int maximumPoolSize, Duration keepAliveTime,
1193         BlockingQueue!(Runnable) workQueue) {
1194         this(corePoolSize, maximumPoolSize, keepAliveTime, workQueue,
1195              ThreadFactory.defaultThreadFactory(), defaultHandler);
1196     }
1197 
1198     /**
1199      * Creates a new {@code ThreadPoolExecutor} with the given initial
1200      * parameters and {@linkplain ThreadPoolExecutor.AbortPolicy
1201      * default rejected execution handler}.
1202      *
1203      * @param corePoolSize the number of threads to keep in the pool, even
1204      *        if they are idle, unless {@code allowCoreThreadTimeOut} is set
1205      * @param maximumPoolSize the maximum number of threads to allow in the
1206      *        pool
1207      * @param keepAliveTime when the number of threads is greater than
1208      *        the core, this is the maximum time that excess idle threads
1209      *        will wait for new tasks before terminating.
1210      * @param workQueue the queue to use for holding tasks before they are
1211      *        executed.  This queue will hold only the {@code Runnable}
1212      *        tasks submitted by the {@code execute} method.
1213      * @param threadFactory the factory to use when the executor
1214      *        creates a new thread
1215      * @throws IllegalArgumentException if one of the following holds:<br>
1216      *         {@code corePoolSize < 0}<br>
1217      *         {@code keepAliveTime < 0}<br>
1218      *         {@code maximumPoolSize <= 0}<br>
1219      *         {@code maximumPoolSize < corePoolSize}
1220      * @throws NullPointerException if {@code workQueue}
1221      *         or {@code threadFactory} is null
1222      */
1223     this(int corePoolSize, int maximumPoolSize, Duration keepAliveTime, 
1224          BlockingQueue!(Runnable) workQueue, ThreadFactory threadFactory) {
1225         this(corePoolSize, maximumPoolSize, keepAliveTime, workQueue,
1226              threadFactory, defaultHandler);
1227     }
1228 
1229     /**
1230      * Creates a new {@code ThreadPoolExecutor} with the given initial
1231      * parameters and
1232      * {@linkplain ThreadFactory#defaultThreadFactory default thread factory}.
1233      *
1234      * @param corePoolSize the number of threads to keep in the pool, even
1235      *        if they are idle, unless {@code allowCoreThreadTimeOut} is set
1236      * @param maximumPoolSize the maximum number of threads to allow in the
1237      *        pool
1238      * @param keepAliveTime when the number of threads is greater than
1239      *        the core, this is the maximum time that excess idle threads
1240      *        will wait for new tasks before terminating.
1241      * @param workQueue the queue to use for holding tasks before they are
1242      *        executed.  This queue will hold only the {@code Runnable}
1243      *        tasks submitted by the {@code execute} method.
1244      * @param handler the handler to use when execution is blocked
1245      *        because the thread bounds and queue capacities are reached
1246      * @throws IllegalArgumentException if one of the following holds:<br>
1247      *         {@code corePoolSize < 0}<br>
1248      *         {@code keepAliveTime < 0}<br>
1249      *         {@code maximumPoolSize <= 0}<br>
1250      *         {@code maximumPoolSize < corePoolSize}
1251      * @throws NullPointerException if {@code workQueue}
1252      *         or {@code handler} is null
1253      */
1254     this(int corePoolSize, int maximumPoolSize, Duration keepAliveTime, 
1255         BlockingQueue!(Runnable) workQueue, RejectedExecutionHandler handler) {
1256         this(corePoolSize, maximumPoolSize, keepAliveTime, workQueue,
1257              ThreadFactory.defaultThreadFactory(), handler);
1258     }
1259 
1260     /**
1261      * Creates a new {@code ThreadPoolExecutor} with the given initial
1262      * parameters.
1263      *
1264      * @param corePoolSize the number of threads to keep in the pool, even
1265      *        if they are idle, unless {@code allowCoreThreadTimeOut} is set
1266      * @param maximumPoolSize the maximum number of threads to allow in the
1267      *        pool
1268      * @param keepAliveTime when the number of threads is greater than
1269      *        the core, this is the maximum time that excess idle threads
1270      *        will wait for new tasks before terminating.
1271      * @param workQueue the queue to use for holding tasks before they are
1272      *        executed.  This queue will hold only the {@code Runnable}
1273      *        tasks submitted by the {@code execute} method.
1274      * @param threadFactory the factory to use when the executor
1275      *        creates a new thread
1276      * @param handler the handler to use when execution is blocked
1277      *        because the thread bounds and queue capacities are reached
1278      * @throws IllegalArgumentException if one of the following holds:<br>
1279      *         {@code corePoolSize < 0}<br>
1280      *         {@code keepAliveTime < 0}<br>
1281      *         {@code maximumPoolSize <= 0}<br>
1282      *         {@code maximumPoolSize < corePoolSize}
1283      * @throws NullPointerException if {@code workQueue}
1284      *         or {@code threadFactory} or {@code handler} is null
1285      */
1286     this(int corePoolSize, int maximumPoolSize, Duration keepAliveTime,
1287             BlockingQueue!(Runnable) workQueue,
1288             ThreadFactory threadFactory, RejectedExecutionHandler handler) {
1289 
1290         initialize();
1291         this.keepAliveTime = keepAliveTime.total!(TimeUnit.HectoNanosecond)();
1292         if (corePoolSize < 0 || maximumPoolSize <= 0 || 
1293             maximumPoolSize < corePoolSize || this.keepAliveTime < 0)
1294             throw new IllegalArgumentException();
1295 
1296         if (workQueue is null || threadFactory is null || handler is null)
1297             throw new NullPointerException();
1298 
1299         this.corePoolSize = corePoolSize;
1300         this.maximumPoolSize = maximumPoolSize;
1301         this.workQueue = workQueue;
1302         this.threadFactory = threadFactory;
1303         this.handler = handler;
1304     }
1305 
1306     /**
1307      * Executes the given task sometime in the future.  The task
1308      * may execute in a new thread or in an existing pooled thread.
1309      *
1310      * If the task cannot be submitted for execution, either because this
1311      * executor has been shutdown or because its capacity has been reached,
1312      * the task is handled by the current {@link RejectedExecutionHandler}.
1313      *
1314      * @param command the task to execute
1315      * @throws RejectedExecutionException at discretion of
1316      *         {@code RejectedExecutionHandler}, if the task
1317      *         cannot be accepted for execution
1318      * @throws NullPointerException if {@code command} is null
1319      */
1320     void execute(Runnable command) {
1321         if (command is null)
1322             throw new NullPointerException();
1323         /*
1324          * Proceed in 3 steps:
1325          *
1326          * 1. If fewer than corePoolSize threads are running, try to
1327          * start a new thread with the given command as its first
1328          * task.  The call to addWorker atomically checks runState and
1329          * workerCount, and so prevents false alarms that would add
1330          * threads when it shouldn't, by returning false.
1331          *
1332          * 2. If a task can be successfully queued, then we still need
1333          * to double-check whether we should have added a thread
1334          * (because existing ones died since last checking) or that
1335          * the pool shut down since entry into this method. So we
1336          * recheck state and if necessary roll back the enqueuing if
1337          * stopped, or start a new thread if there are none.
1338          *
1339          * 3. If we cannot queue task, then we try to add a new
1340          * thread.  If it fails, we know we are shut down or saturated
1341          * and so reject the task.
1342          */
1343         int c = ctl;
1344         if (workerCountOf(c) < corePoolSize) {
1345             if (addWorker(command, true))
1346                 return;
1347             c = ctl;
1348         }
1349         if (isRunning(c) && workQueue.offer(command)) {
1350             int recheck = ctl;
1351             if (! isRunning(recheck) && remove(command))
1352                 reject(command);
1353             else if (workerCountOf(recheck) == 0)
1354                 addWorker(null, false);
1355         }
1356         else if (!addWorker(command, false))
1357             reject(command);
1358     }
1359 
1360     /**
1361      * Initiates an orderly shutdown in which previously submitted
1362      * tasks are executed, but no new tasks will be accepted.
1363      * Invocation has no additional effect if already shut down.
1364      *
1365      * <p>This method does not wait for previously submitted tasks to
1366      * complete execution.  Use {@link #awaitTermination awaitTermination}
1367      * to do that.
1368      *
1369      * @throws SecurityException {@inheritDoc}
1370      */
1371     void shutdown() {
1372         Mutex mainLock = this.mainLock;
1373         mainLock.lock();
1374         try {
1375             checkShutdownAccess();
1376             advanceRunState(SHUTDOWN);
1377             interruptIdleWorkers();
1378             onShutdown(); // hook for ScheduledThreadPoolExecutor
1379         } finally {
1380             mainLock.unlock();
1381         }
1382         tryTerminate();
1383     }
1384 
1385     /**
1386      * Attempts to stop all actively executing tasks, halts the
1387      * processing of waiting tasks, and returns a list of the tasks
1388      * that were awaiting execution. These tasks are drained (removed)
1389      * from the task queue upon return from this method.
1390      *
1391      * <p>This method does not wait for actively executing tasks to
1392      * terminate.  Use {@link #awaitTermination awaitTermination} to
1393      * do that.
1394      *
1395      * <p>There are no guarantees beyond best-effort attempts to stop
1396      * processing actively executing tasks.  This implementation
1397      * interrupts tasks via {@link Thread#interrupt}; any task that
1398      * fails to respond to interrupts may never terminate.
1399      *
1400      * @throws SecurityException {@inheritDoc}
1401      */
1402     List!(Runnable) shutdownNow() {
1403         List!(Runnable) tasks;
1404         Mutex mainLock = this.mainLock;
1405         mainLock.lock();
1406         try {
1407             checkShutdownAccess();
1408             advanceRunState(STOP);
1409             interruptWorkers();
1410             tasks = drainQueue();
1411         } finally {
1412             mainLock.unlock();
1413         }
1414         tryTerminate();
1415         return tasks;
1416     }
1417 
1418     bool isShutdown() {
1419         return runStateAtLeast(ctl, SHUTDOWN);
1420     }
1421 
1422     /** Used by ScheduledThreadPoolExecutor. */
1423     bool isStopped() {
1424         return runStateAtLeast(ctl, STOP);
1425     }
1426 
1427     /**
1428      * Returns true if this executor is in the process of terminating
1429      * after {@link #shutdown} or {@link #shutdownNow} but has not
1430      * completely terminated.  This method may be useful for
1431      * debugging. A return of {@code true} reported a sufficient
1432      * period after shutdown may indicate that submitted tasks have
1433      * ignored or suppressed interruption, causing this executor not
1434      * to properly terminate.
1435      *
1436      * @return {@code true} if terminating but not yet terminated
1437      */
1438     bool isTerminating() {
1439         int c = ctl;
1440         return runStateAtLeast(c, SHUTDOWN) && runStateLessThan(c, TERMINATED);
1441     }
1442 
1443     bool isTerminated() {
1444         return runStateAtLeast(ctl, TERMINATED);
1445     }
1446 
1447     bool awaitTermination(Duration timeout) {
1448         // long nanos = timeout.total!(TimeUnit.HectoNanosecond);
1449         Mutex mainLock = this.mainLock;
1450         mainLock.lock();
1451         try {
1452             while (runStateLessThan(ctl, TERMINATED)) {
1453                 // if (nanos <= 0L)
1454                 //     return false;
1455                 // nanos = termination.awaitNanos(nanos);
1456                 // FIXME: Needing refactor or cleanup -@zxp at 10/18/2018, 9:31:16 AM
1457                 // 
1458                 if(termination.wait(timeout))
1459                     return false;
1460             }
1461             return true;
1462         } finally {
1463             mainLock.unlock();
1464         }
1465     }
1466 
1467     // Override without "throws Throwable" for compatibility with subclasses
1468     // whose finalize method invokes super.finalize() (as is recommended).
1469     // Before JDK 11, finalize() had a non-empty method body.
1470 
1471     /**
1472      * @implNote Previous versions of this class had a finalize method
1473      * that shut down this executor, but in this version, finalize
1474      * does nothing.
1475      */
1476     //@Deprecated(since="9")
1477     protected void finalize() {}
1478 
1479     /**
1480      * Sets the thread factory used to create new threads.
1481      *
1482      * @param threadFactory the new thread factory
1483      * @throws NullPointerException if threadFactory is null
1484      * @see #getThreadFactory
1485      */
1486     void setThreadFactory(ThreadFactory threadFactory) {
1487         if (threadFactory is null)
1488             throw new NullPointerException();
1489         this.threadFactory = threadFactory;
1490     }
1491 
1492     /**
1493      * Returns the thread factory used to create new threads.
1494      *
1495      * @return the current thread factory
1496      * @see #setThreadFactory(ThreadFactory)
1497      */
1498     ThreadFactory getThreadFactory() {
1499         return threadFactory;
1500     }
1501 
1502     /**
1503      * Sets a new handler for unexecutable tasks.
1504      *
1505      * @param handler the new handler
1506      * @throws NullPointerException if handler is null
1507      * @see #getRejectedExecutionHandler
1508      */
1509     void setRejectedExecutionHandler(RejectedExecutionHandler handler) {
1510         if (handler is null)
1511             throw new NullPointerException();
1512         this.handler = handler;
1513     }
1514 
1515     /**
1516      * Returns the current handler for unexecutable tasks.
1517      *
1518      * @return the current handler
1519      * @see #setRejectedExecutionHandler(RejectedExecutionHandler)
1520      */
1521     RejectedExecutionHandler getRejectedExecutionHandler() {
1522         return handler;
1523     }
1524 
1525     /**
1526      * Sets the core number of threads.  This overrides any value set
1527      * in the constructor.  If the new value is smaller than the
1528      * current value, excess existing threads will be terminated when
1529      * they next become idle.  If larger, new threads will, if needed,
1530      * be started to execute any queued tasks.
1531      *
1532      * @param corePoolSize the new core size
1533      * @throws IllegalArgumentException if {@code corePoolSize < 0}
1534      *         or {@code corePoolSize} is greater than the {@linkplain
1535      *         #getMaximumPoolSize() maximum pool size}
1536      * @see #getCorePoolSize
1537      */
1538     void setCorePoolSize(int corePoolSize) {
1539         if (corePoolSize < 0 || maximumPoolSize < corePoolSize)
1540             throw new IllegalArgumentException();
1541         int delta = corePoolSize - this.corePoolSize;
1542         this.corePoolSize = corePoolSize;
1543         if (workerCountOf(ctl) > corePoolSize)
1544             interruptIdleWorkers();
1545         else if (delta > 0) {
1546             // We don't really know how many new threads are "needed".
1547             // As a heuristic, prestart enough new workers (up to new
1548             // core size) to handle the current number of tasks in
1549             // queue, but stop if queue becomes empty while doing so.
1550             int k = min(delta, workQueue.size());
1551             while (k-- > 0 && addWorker(null, true)) {
1552                 if (workQueue.isEmpty())
1553                     break;
1554             }
1555         }
1556     }
1557 
1558     /**
1559      * Returns the core number of threads.
1560      *
1561      * @return the core number of threads
1562      * @see #setCorePoolSize
1563      */
1564     int getCorePoolSize() {
1565         return corePoolSize;
1566     }
1567 
1568     /**
1569      * Starts a core thread, causing it to idly wait for work. This
1570      * overrides the default policy of starting core threads only when
1571      * new tasks are executed. This method will return {@code false}
1572      * if all core threads have already been started.
1573      *
1574      * @return {@code true} if a thread was started
1575      */
1576     bool prestartCoreThread() {
1577         return workerCountOf(ctl) < corePoolSize &&
1578             addWorker(null, true);
1579     }
1580 
1581     /**
1582      * Same as prestartCoreThread except arranges that at least one
1583      * thread is started even if corePoolSize is 0.
1584      */
1585     void ensurePrestart() {
1586         int wc = workerCountOf(ctl);
1587         if (wc < corePoolSize)
1588             addWorker(null, true);
1589         else if (wc == 0)
1590             addWorker(null, false);
1591     }
1592 
1593     /**
1594      * Starts all core threads, causing them to idly wait for work. This
1595      * overrides the default policy of starting core threads only when
1596      * new tasks are executed.
1597      *
1598      * @return the number of threads started
1599      */
1600     int prestartAllCoreThreads() {
1601         int n = 0;
1602         while (addWorker(null, true))
1603             ++n;
1604         return n;
1605     }
1606 
1607     /**
1608      * Returns true if this pool allows core threads to time out and
1609      * terminate if no tasks arrive within the keepAlive time, being
1610      * replaced if needed when new tasks arrive. When true, the same
1611      * keep-alive policy applying to non-core threads applies also to
1612      * core threads. When false (the default), core threads are never
1613      * terminated due to lack of incoming tasks.
1614      *
1615      * @return {@code true} if core threads are allowed to time out,
1616      *         else {@code false}
1617      *
1618      */
1619     bool allowsCoreThreadTimeOut() {
1620         return _allowCoreThreadTimeOut;
1621     }
1622 
1623     /**
1624      * Sets the policy governing whether core threads may time out and
1625      * terminate if no tasks arrive within the keep-alive time, being
1626      * replaced if needed when new tasks arrive. When false, core
1627      * threads are never terminated due to lack of incoming
1628      * tasks. When true, the same keep-alive policy applying to
1629      * non-core threads applies also to core threads. To avoid
1630      * continual thread replacement, the keep-alive time must be
1631      * greater than zero when setting {@code true}. This method
1632      * should in general be called before the pool is actively used.
1633      *
1634      * @param value {@code true} if should time out, else {@code false}
1635      * @throws IllegalArgumentException if value is {@code true}
1636      *         and the current keep-alive time is not greater than zero
1637      *
1638      */
1639     void allowCoreThreadTimeOut(bool value) {
1640         if (value && keepAliveTime <= 0)
1641             throw new IllegalArgumentException("Core threads must have nonzero keep alive times");
1642         if (value != _allowCoreThreadTimeOut) {
1643             _allowCoreThreadTimeOut = value;
1644             if (value)
1645                 interruptIdleWorkers();
1646         }
1647     }
1648 
1649     /**
1650      * Sets the maximum allowed number of threads. This overrides any
1651      * value set in the constructor. If the new value is smaller than
1652      * the current value, excess existing threads will be
1653      * terminated when they next become idle.
1654      *
1655      * @param maximumPoolSize the new maximum
1656      * @throws IllegalArgumentException if the new maximum is
1657      *         less than or equal to zero, or
1658      *         less than the {@linkplain #getCorePoolSize core pool size}
1659      * @see #getMaximumPoolSize
1660      */
1661     void setMaximumPoolSize(int maximumPoolSize) {
1662         if (maximumPoolSize <= 0 || maximumPoolSize < corePoolSize)
1663             throw new IllegalArgumentException();
1664         this.maximumPoolSize = maximumPoolSize;
1665         if (workerCountOf(ctl) > maximumPoolSize)
1666             interruptIdleWorkers();
1667     }
1668 
1669     /**
1670      * Returns the maximum allowed number of threads.
1671      *
1672      * @return the maximum allowed number of threads
1673      * @see #setMaximumPoolSize
1674      */
1675     int getMaximumPoolSize() {
1676         return maximumPoolSize;
1677     }
1678 
1679     /**
1680      * Sets the thread keep-alive time, which is the amount of time
1681      * that threads may remain idle before being terminated.
1682      * Threads that wait this amount of time without processing a
1683      * task will be terminated if there are more than the core
1684      * number of threads currently in the pool, or if this pool
1685      * {@linkplain #allowsCoreThreadTimeOut() allows core thread timeout}.
1686      * This overrides any value set in the constructor.
1687      *
1688      * @param time the time to wait.  A time value of zero will cause
1689      *        excess threads to terminate immediately after executing tasks.
1690      * @param unit the time unit of the {@code time} argument
1691      * @throws IllegalArgumentException if {@code time} less than zero or
1692      *         if {@code time} is zero and {@code allowsCoreThreadTimeOut}
1693      * @see #getKeepAliveTime(TimeUnit)
1694      */
1695     void setKeepAliveTime(Duration time) {
1696         long keepAliveTime = time.total!(TimeUnit.HectoNanosecond)();
1697         if (keepAliveTime < 0)
1698             throw new IllegalArgumentException();
1699         if (keepAliveTime == 0 && allowsCoreThreadTimeOut())
1700             throw new IllegalArgumentException("Core threads must have nonzero keep alive times");
1701         long delta = keepAliveTime - this.keepAliveTime;
1702         this.keepAliveTime = keepAliveTime;
1703         if (delta < 0)
1704             interruptIdleWorkers();
1705     }
1706 
1707     /**
1708      * Returns the thread keep-alive time, which is the amount of time
1709      * that threads may remain idle before being terminated.
1710      * Threads that wait this amount of time without processing a
1711      * task will be terminated if there are more than the core
1712      * number of threads currently in the pool, or if this pool
1713      * {@linkplain #allowsCoreThreadTimeOut() allows core thread timeout}.
1714      *
1715      * @param unit the desired time unit of the result
1716      * @return the time limit
1717      * @see #setKeepAliveTime(long, TimeUnit)
1718      */
1719     long getKeepAliveTime() {
1720         // return unit.convert(keepAliveTime, TimeUnit.NANOSECONDS);
1721         return keepAliveTime;
1722     }
1723 
1724     /* User-level queue utilities */
1725 
1726     /**
1727      * Returns the task queue used by this executor. Access to the
1728      * task queue is intended primarily for debugging and monitoring.
1729      * This queue may be in active use.  Retrieving the task queue
1730      * does not prevent queued tasks from executing.
1731      *
1732      * @return the task queue
1733      */
1734     BlockingQueue!(Runnable) getQueue() {
1735         return workQueue;
1736     }
1737 
1738     /**
1739      * Removes this task from the executor's internal queue if it is
1740      * present, thus causing it not to be run if it has not already
1741      * started.
1742      *
1743      * <p>This method may be useful as one part of a cancellation
1744      * scheme.  It may fail to remove tasks that have been converted
1745      * into other forms before being placed on the internal queue.
1746      * For example, a task entered using {@code submit} might be
1747      * converted into a form that maintains {@code Future} status.
1748      * However, in such cases, method {@link #purge} may be used to
1749      * remove those Futures that have been cancelled.
1750      *
1751      * @param task the task to remove
1752      * @return {@code true} if the task was removed
1753      */
1754     bool remove(Runnable task) {
1755         bool removed = workQueue.remove(task);
1756         tryTerminate(); // In case SHUTDOWN and now empty
1757         return removed;
1758     }
1759 
1760     /**
1761      * Tries to remove from the work queue all {@link Future}
1762      * tasks that have been cancelled. This method can be useful as a
1763      * storage reclamation operation, that has no other impact on
1764      * functionality. Cancelled tasks are never executed, but may
1765      * accumulate in work queues until worker threads can actively
1766      * remove them. Invoking this method instead tries to remove them now.
1767      * However, this method may fail to remove tasks in
1768      * the presence of interference by other threads.
1769      */
1770     void purge() {
1771         BlockingQueue!(Runnable) q = workQueue;
1772         try {
1773             foreach(Runnable r; q) {
1774                 Future!Runnable f = cast(Future!Runnable) r;
1775                 if(f !is null && f.isCancelled())
1776                     q.remove(r);
1777             }
1778             // Iterator!(Runnable) it = q.iterator();
1779             // while (it.hasNext()) {
1780             //     Runnable r = it.next();
1781             //     if (r instanceof Future<?> && ((Future<?>)r).isCancelled())
1782             //         it.remove();
1783             // }
1784         } catch (ConcurrentModificationException fallThrough) {
1785             // Take slow path if we encounter interference during traversal.
1786             // Make copy for traversal and call remove for cancelled entries.
1787             // The slow path is more likely to be O(N*N).
1788             foreach (Runnable r ; q.toArray()) {
1789                 Future!Runnable f = cast(Future!Runnable) r;
1790                 if(f !is null && f.isCancelled())
1791                     q.remove(r);
1792             }
1793         }
1794 
1795         tryTerminate(); // In case SHUTDOWN and now empty
1796     }
1797 
1798     /* Statistics */
1799 
1800     /**
1801      * Returns the current number of threads in the pool.
1802      *
1803      * @return the number of threads
1804      */
1805     int getPoolSize() {
1806         Mutex mainLock = this.mainLock;
1807         mainLock.lock();
1808         try {
1809             // Remove rare and surprising possibility of
1810             // isTerminated() && getPoolSize() > 0
1811             return runStateAtLeast(ctl, TIDYING) ? 0 : workers.size();
1812         } finally {
1813             mainLock.unlock();
1814         }
1815     }
1816 
1817     /**
1818      * Returns the approximate number of threads that are actively
1819      * executing tasks.
1820      *
1821      * @return the number of threads
1822      */
1823     int getActiveCount() {
1824         Mutex mainLock = this.mainLock;
1825         mainLock.lock();
1826         try {
1827             int n = 0;
1828             foreach (Worker w ; workers)
1829                 if (w.isLocked()) ++n;
1830             return n;
1831         } finally {
1832             mainLock.unlock();
1833         }
1834     }
1835 
1836     /**
1837      * Returns the largest number of threads that have ever
1838      * simultaneously been in the pool.
1839      *
1840      * @return the number of threads
1841      */
1842     int getLargestPoolSize() {
1843         Mutex mainLock = this.mainLock;
1844         mainLock.lock();
1845         try {
1846             return largestPoolSize;
1847         } finally {
1848             mainLock.unlock();
1849         }
1850     }
1851 
1852     /**
1853      * Returns the approximate total number of tasks that have ever been
1854      * scheduled for execution. Because the states of tasks and
1855      * threads may change dynamically during computation, the returned
1856      * value is only an approximation.
1857      *
1858      * @return the number of tasks
1859      */
1860     long getTaskCount() {
1861         Mutex mainLock = this.mainLock;
1862         mainLock.lock();
1863         try {
1864             long n = completedTaskCount;
1865             foreach (Worker w ; workers) {
1866                 n += w.completedTasks;
1867                 if (w.isLocked())
1868                     ++n;
1869             }
1870             return n + workQueue.size();
1871         } finally {
1872             mainLock.unlock();
1873         }
1874     }
1875 
1876     /**
1877      * Returns the approximate total number of tasks that have
1878      * completed execution. Because the states of tasks and threads
1879      * may change dynamically during computation, the returned value
1880      * is only an approximation, but one that does not ever decrease
1881      * across successive calls.
1882      *
1883      * @return the number of tasks
1884      */
1885     long getCompletedTaskCount() {
1886         Mutex mainLock = this.mainLock;
1887         mainLock.lock();
1888         try {
1889             long n = completedTaskCount;
1890             foreach (Worker w ; workers)
1891                 n += w.completedTasks;
1892             return n;
1893         } finally {
1894             mainLock.unlock();
1895         }
1896     }
1897 
1898     /**
1899      * Returns a string identifying this pool, as well as its state,
1900      * including indications of run state and estimated worker and
1901      * task counts.
1902      *
1903      * @return a string identifying this pool, as well as its state
1904      */
1905     override string toString() {
1906         long ncompleted;
1907         int nworkers, nactive;
1908         Mutex mainLock = this.mainLock;
1909         mainLock.lock();
1910         try {
1911             ncompleted = completedTaskCount;
1912             nactive = 0;
1913             nworkers = workers.size();
1914             foreach (Worker w ; workers) {
1915                 ncompleted += w.completedTasks;
1916                 if (w.isLocked())
1917                     ++nactive;
1918             }
1919         } finally {
1920             mainLock.unlock();
1921         }
1922         int c = ctl;
1923         string runState =
1924             isRunning(c) ? "Running" :
1925             runStateAtLeast(c, TERMINATED) ? "Terminated" :
1926             "Shutting down";
1927         return super.toString() ~
1928             "[" ~ runState ~
1929             ", pool size = " ~ nworkers.to!string() ~
1930             ", active threads = " ~ nactive.to!string() ~
1931             ", queued tasks = " ~ to!string(workQueue.size()) ~
1932             ", completed tasks = " ~ ncompleted.to!string() ~
1933             "]";
1934     }
1935 
1936     /* Extension hooks */
1937 
1938     /**
1939      * Method invoked prior to executing the given Runnable in the
1940      * given thread.  This method is invoked by thread {@code t} that
1941      * will execute task {@code r}, and may be used to re-initialize
1942      * ThreadLocals, or to perform logging.
1943      *
1944      * <p>This implementation does nothing, but may be customized in
1945      * subclasses. Note: To properly nest multiple overridings, subclasses
1946      * should generally invoke {@code super.beforeExecute} at the end of
1947      * this method.
1948      *
1949      * @param t the thread that will run task {@code r}
1950      * @param r the task that will be executed
1951      */
1952     protected void beforeExecute(Thread t, Runnable r) { }
1953 
1954     /**
1955      * Method invoked upon completion of execution of the given Runnable.
1956      * This method is invoked by the thread that executed the task. If
1957      * non-null, the Throwable is the uncaught {@code RuntimeException}
1958      * or {@code Error} that caused execution to terminate abruptly.
1959      *
1960      * <p>This implementation does nothing, but may be customized in
1961      * subclasses. Note: To properly nest multiple overridings, subclasses
1962      * should generally invoke {@code super.afterExecute} at the
1963      * beginning of this method.
1964      *
1965      * <p><b>Note:</b> When actions are enclosed in tasks (such as
1966      * {@link FutureTask}) either explicitly or via methods such as
1967      * {@code submit}, these task objects catch and maintain
1968      * computational exceptions, and so they do not cause abrupt
1969      * termination, and the internal exceptions are <em>not</em>
1970      * passed to this method. If you would like to trap both kinds of
1971      * failures in this method, you can further probe for such cases,
1972      * as in this sample subclass that prints either the direct cause
1973      * or the underlying exception if a task has been aborted:
1974      *
1975      * <pre> {@code
1976      * class ExtendedExecutor : ThreadPoolExecutor {
1977      *   // ...
1978      *   protected void afterExecute(Runnable r, Throwable t) {
1979      *     super.afterExecute(r, t);
1980      *     if (t is null
1981      *         && r instanceof Future<?>
1982      *         && ((Future<?>)r).isDone()) {
1983      *       try {
1984      *         Object result = ((Future<?>) r).get();
1985      *       } catch (CancellationException ce) {
1986      *         t = ce;
1987      *       } catch (ExecutionException ee) {
1988      *         t = ee.getCause();
1989      *       } catch (InterruptedException ie) {
1990      *         // ignore/reset
1991      *         Thread.getThis().interrupt();
1992      *       }
1993      *     }
1994      *     if (t !is null)
1995      *       System.out.println(t);
1996      *   }
1997      * }}</pre>
1998      *
1999      * @param r the runnable that has completed
2000      * @param t the exception that caused termination, or null if
2001      * execution completed normally
2002      */
2003     protected void afterExecute(Runnable r, Throwable t) { }
2004 
2005     /**
2006      * Method invoked when the Executor has terminated.  Default
2007      * implementation does nothing. Note: To properly nest multiple
2008      * overridings, subclasses should generally invoke
2009      * {@code super.terminated} within this method.
2010      */
2011     protected void terminated() { }
2012 }
2013 
2014 
2015 /**
2016  * A handler for tasks that cannot be executed by a {@link ThreadPoolExecutor}.
2017  *
2018  * @author Doug Lea
2019  */
2020 interface RejectedExecutionHandler {
2021 
2022     /**
2023      * Method that may be invoked by a {@link ThreadPoolExecutor} when
2024      * {@link ThreadPoolExecutor#execute execute} cannot accept a
2025      * task.  This may occur when no more threads or queue slots are
2026      * available because their bounds would be exceeded, or upon
2027      * shutdown of the Executor.
2028      *
2029      * <p>In the absence of other alternatives, the method may throw
2030      * an unchecked {@link RejectedExecutionException}, which will be
2031      * propagated to the caller of {@code execute}.
2032      *
2033      * @param r the runnable task requested to be executed
2034      * @param executor the executor attempting to execute this task
2035      * @throws RejectedExecutionException if there is no remedy
2036      */
2037     void rejectedExecution(Runnable r, ThreadPoolExecutor executor);
2038 }
2039 
2040 /* Predefined RejectedExecutionHandlers */
2041 
2042 /**
2043  * A handler for rejected tasks that runs the rejected task
2044  * directly in the calling thread of the {@code execute} method,
2045  * unless the executor has been shut down, in which case the task
2046  * is discarded.
2047  */
2048 class CallerRunsPolicy : RejectedExecutionHandler {
2049     /**
2050      * Creates a {@code CallerRunsPolicy}.
2051      */
2052     this() { }
2053 
2054     /**
2055      * Executes task r in the caller's thread, unless the executor
2056      * has been shut down, in which case the task is discarded.
2057      *
2058      * @param r the runnable task requested to be executed
2059      * @param e the executor attempting to execute this task
2060      */
2061     void rejectedExecution(Runnable r, ThreadPoolExecutor e) {
2062         if (!e.isShutdown()) {
2063             r.run();
2064         }
2065     }
2066 }
2067 
2068 /**
2069  * A handler for rejected tasks that throws a
2070  * {@link RejectedExecutionException}.
2071  *
2072  * This is the default handler for {@link ThreadPoolExecutor} and
2073  * {@link ScheduledThreadPoolExecutor}.
2074  */
2075 class AbortPolicy : RejectedExecutionHandler {
2076     /**
2077      * Creates an {@code AbortPolicy}.
2078      */
2079     this() { }
2080 
2081     /**
2082      * Always throws RejectedExecutionException.
2083      *
2084      * @param r the runnable task requested to be executed
2085      * @param e the executor attempting to execute this task
2086      * @throws RejectedExecutionException always
2087      */
2088     void rejectedExecution(Runnable r, ThreadPoolExecutor e) {
2089         throw new RejectedExecutionException("Task " ~ (cast(Object)r).toString() ~
2090                                              " rejected from " ~
2091                                              e.toString());
2092     }
2093 }
2094 
2095 /**
2096  * A handler for rejected tasks that silently discards the
2097  * rejected task.
2098  */
2099 class DiscardPolicy : RejectedExecutionHandler {
2100     /**
2101      * Creates a {@code DiscardPolicy}.
2102      */
2103     this() { }
2104 
2105     /**
2106      * Does nothing, which has the effect of discarding task r.
2107      *
2108      * @param r the runnable task requested to be executed
2109      * @param e the executor attempting to execute this task
2110      */
2111     void rejectedExecution(Runnable r, ThreadPoolExecutor e) {
2112     }
2113 }
2114 
2115 /**
2116 * A handler for rejected tasks that discards the oldest unhandled
2117 * request and then retries {@code execute}, unless the executor
2118 * is shut down, in which case the task is discarded.
2119 */
2120 class DiscardOldestPolicy : RejectedExecutionHandler {
2121     /**
2122     * Creates a {@code DiscardOldestPolicy} for the given executor.
2123     */
2124     this() { }
2125 
2126     /**
2127     * Obtains and ignores the next task that the executor
2128     * would otherwise execute, if one is immediately available,
2129     * and then retries execution of task r, unless the executor
2130     * is shut down, in which case task r is instead discarded.
2131     *
2132     * @param r the runnable task requested to be executed
2133     * @param e the executor attempting to execute this task
2134     */
2135     void rejectedExecution(Runnable r, ThreadPoolExecutor e) {
2136         if (!e.isShutdown()) {
2137             e.getQueue().poll();
2138             e.execute(r);
2139         }
2140     }
2141 }