1 /* 2 * Written by Doug Lea with assistance from members of JCP JSR-166 3 * Expert Group and released to the public domain, as explained at 4 * http://creativecommons.org/publicdomain/zero/1.0/ 5 */ 6 7 module hunt.concurrency.ConcurrentHashMap; 8 9 import hunt.concurrency.ConcurrentMap; 10 11 // import java.io.ObjectStreamField; 12 // import java.io.Serializable; 13 // import java.lang.reflect.ParameterizedType; 14 // import java.lang.reflect.Type; 15 import hunt.collection.AbstractMap; 16 // import hunt.collection.Arrays; 17 import hunt.collection.Collection; 18 import hunt.collection.Enumeration; 19 import hunt.collection.HashMap; 20 // import hunt.collection.Hashtable; 21 import hunt.collection.Iterator; 22 import hunt.collection.Map; 23 import hunt.collection.Set; 24 // import hunt.collection.Spliterator; 25 import hunt.Exceptions; 26 27 import hunt.Functions; 28 29 // import hunt.collection.concurrent.atomic.AtomicReference; 30 // import hunt.collection.concurrent.locks.LockSupport; 31 // import hunt.collection.concurrent.locks.ReentrantLock; 32 // import java.util.function.BiConsumer; 33 // import java.util.function.BiFunction; 34 // import java.util.function.Consumer; 35 // import java.util.function.DoubleBinaryOperator; 36 // import java.util.function.Function; 37 // import java.util.function.IntBinaryOperator; 38 // import java.util.function.LongBinaryOperator; 39 // import java.util.function.Predicate; 40 // import java.util.function.ToDoubleBiFunction; 41 // import java.util.function.ToDoubleFunction; 42 // import java.util.function.ToIntBiFunction; 43 // import java.util.function.ToIntFunction; 44 // import java.util.function.ToLongBiFunction; 45 // import java.util.function.ToLongFunction; 46 // import java.util.stream.Stream; 47 // import jdk.internal.misc.Unsafe; 48 49 // /** 50 // * A hash table supporting full concurrency of retrievals and 51 // * high expected concurrency for updates. This class obeys the 52 // * same functional specification as {@link java.util.Hashtable}, and 53 // * includes versions of methods corresponding to each method of 54 // * {@code Hashtable}. However, even though all operations are 55 // * thread-safe, retrieval operations do <em>not</em> entail locking, 56 // * and there is <em>not</em> any support for locking the entire table 57 // * in a way that prevents all access. This class is fully 58 // * interoperable with {@code Hashtable} in programs that rely on its 59 // * thread safety but not on its synchronization details. 60 // * 61 // * <p>Retrieval operations (including {@code get}) generally do not 62 // * block, so may overlap with update operations (including {@code put} 63 // * and {@code remove}). Retrievals reflect the results of the most 64 // * recently <em>completed</em> update operations holding upon their 65 // * onset. (More formally, an update operation for a given key bears a 66 // * <em>happens-before</em> relation with any (non-null) retrieval for 67 // * that key reporting the updated value.) For aggregate operations 68 // * such as {@code putAll} and {@code clear}, concurrent retrievals may 69 // * reflect insertion or removal of only some entries. Similarly, 70 // * Iterators, Spliterators and Enumerations return elements reflecting the 71 // * state of the hash table at some point at or since the creation of the 72 // * iterator/enumeration. They do <em>not</em> throw {@link 73 // * java.util.ConcurrentModificationException ConcurrentModificationException}. 74 // * However, iterators are designed to be used by only one thread at a time. 75 // * Bear in mind that the results of aggregate status methods including 76 // * {@code size}, {@code isEmpty}, and {@code containsValue} are typically 77 // * useful only when a map is not undergoing concurrent updates in other threads. 78 // * Otherwise the results of these methods reflect transient states 79 // * that may be adequate for monitoring or estimation purposes, but not 80 // * for program control. 81 // * 82 // * <p>The table is dynamically expanded when there are too many 83 // * collisions (i.e., keys that have distinct hash codes but fall into 84 // * the same slot modulo the table size), with the expected average 85 // * effect of maintaining roughly two bins per mapping (corresponding 86 // * to a 0.75 load factor threshold for resizing). There may be much 87 // * variance around this average as mappings are added and removed, but 88 // * overall, this maintains a commonly accepted time/space tradeoff for 89 // * hash tables. However, resizing this or any other kind of hash 90 // * table may be a relatively slow operation. When possible, it is a 91 // * good idea to provide a size estimate as an optional {@code 92 // * initialCapacity} constructor argument. An additional optional 93 // * {@code loadFactor} constructor argument provides a further means of 94 // * customizing initial table capacity by specifying the table density 95 // * to be used in calculating the amount of space to allocate for the 96 // * given number of elements. Also, for compatibility with previous 97 // * versions of this class, constructors may optionally specify an 98 // * expected {@code concurrencyLevel} as an additional hint for 99 // * internal sizing. Note that using many keys with exactly the same 100 // * {@code hashCode()} is a sure way to slow down performance of any 101 // * hash table. To ameliorate impact, when keys are {@link Comparable}, 102 // * this class may use comparison order among keys to help break ties. 103 // * 104 // * <p>A {@link Set} projection of a ConcurrentHashMap may be created 105 // * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed 106 // * (using {@link #keySet(Object)} when only keys are of interest, and the 107 // * mapped values are (perhaps transiently) not used or all take the 108 // * same mapping value. 109 // * 110 // * <p>A ConcurrentHashMap can be used as a scalable frequency map (a 111 // * form of histogram or multiset) by using {@link 112 // * java.util.concurrent.atomic.LongAdder} values and initializing via 113 // * {@link #computeIfAbsent computeIfAbsent}. For example, to add a count 114 // * to a {@code ConcurrentHashMap<String,LongAdder> freqs}, you can use 115 // * {@code freqs.computeIfAbsent(key, k -> new LongAdder()).increment();} 116 // * 117 // * <p>This class and its views and iterators implement all of the 118 // * <em>optional</em> methods of the {@link Map} and {@link Iterator} 119 // * interfaces. 120 // * 121 // * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class 122 // * does <em>not</em> allow {@code null} to be used as a key or value. 123 // * 124 // * <p>ConcurrentHashMaps support a set of sequential and parallel bulk 125 // * operations that, unlike most {@link Stream} methods, are designed 126 // * to be safely, and often sensibly, applied even with maps that are 127 // * being concurrently updated by other threads; for example, when 128 // * computing a snapshot summary of the values in a shared registry. 129 // * There are three kinds of operation, each with four forms, accepting 130 // * functions with keys, values, entries, and (key, value) pairs as 131 // * arguments and/or return values. Because the elements of a 132 // * ConcurrentHashMap are not ordered in any particular way, and may be 133 // * processed in different orders in different parallel executions, the 134 // * correctness of supplied functions should not depend on any 135 // * ordering, or on any other objects or values that may transiently 136 // * change while computation is in progress; and except for forEach 137 // * actions, should ideally be side-effect-free. Bulk operations on 138 // * {@link Map.Entry} objects do not support method {@code setValue}. 139 // * 140 // * <ul> 141 // * <li>forEach: Performs a given action on each element. 142 // * A variant form applies a given transformation on each element 143 // * before performing the action. 144 // * 145 // * <li>search: Returns the first available non-null result of 146 // * applying a given function on each element; skipping further 147 // * search when a result is found. 148 // * 149 // * <li>reduce: Accumulates each element. The supplied reduction 150 // * function cannot rely on ordering (more formally, it should be 151 // * both associative and commutative). There are five variants: 152 // * 153 // * <ul> 154 // * 155 // * <li>Plain reductions. (There is not a form of this method for 156 // * (key, value) function arguments since there is no corresponding 157 // * return type.) 158 // * 159 // * <li>Mapped reductions that accumulate the results of a given 160 // * function applied to each element. 161 // * 162 // * <li>Reductions to scalar doubles, longs, and ints, using a 163 // * given basis value. 164 // * 165 // * </ul> 166 // * </ul> 167 // * 168 // * <p>These bulk operations accept a {@code parallelismThreshold} 169 // * argument. Methods proceed sequentially if the current map size is 170 // * estimated to be less than the given threshold. Using a value of 171 // * {@code Long.MAX_VALUE} suppresses all parallelism. Using a value 172 // * of {@code 1} results in maximal parallelism by partitioning into 173 // * enough subtasks to fully utilize the {@link 174 // * ForkJoinPool#commonPool()} that is used for all parallel 175 // * computations. Normally, you would initially choose one of these 176 // * extreme values, and then measure performance of using in-between 177 // * values that trade off overhead versus throughput. 178 // * 179 // * <p>The concurrency properties of bulk operations follow 180 // * from those of ConcurrentHashMap: Any non-null result returned 181 // * from {@code get(key)} and related access methods bears a 182 // * happens-before relation with the associated insertion or 183 // * update. The result of any bulk operation reflects the 184 // * composition of these per-element relations (but is not 185 // * necessarily atomic with respect to the map as a whole unless it 186 // * is somehow known to be quiescent). Conversely, because keys 187 // * and values in the map are never null, null serves as a reliable 188 // * atomic indicator of the current lack of any result. To 189 // * maintain this property, null serves as an implicit basis for 190 // * all non-scalar reduction operations. For the double, long, and 191 // * int versions, the basis should be one that, when combined with 192 // * any other value, returns that other value (more formally, it 193 // * should be the identity element for the reduction). Most common 194 // * reductions have these properties; for example, computing a sum 195 // * with basis 0 or a minimum with basis MAX_VALUE. 196 // * 197 // * <p>Search and transformation functions provided as arguments 198 // * should similarly return null to indicate the lack of any result 199 // * (in which case it is not used). In the case of mapped 200 // * reductions, this also enables transformations to serve as 201 // * filters, returning null (or, in the case of primitive 202 // * specializations, the identity basis) if the element should not 203 // * be combined. You can create compound transformations and 204 // * filterings by composing them yourself under this "null means 205 // * there is nothing there now" rule before using them in search or 206 // * reduce operations. 207 // * 208 // * <p>Methods accepting and/or returning Entry arguments maintain 209 // * key-value associations. They may be useful for example when 210 // * finding the key for the greatest value. Note that "plain" Entry 211 // * arguments can be supplied using {@code new 212 // * AbstractMap.SimpleEntry(k,v)}. 213 // * 214 // * <p>Bulk operations may complete abruptly, throwing an 215 // * exception encountered in the application of a supplied 216 // * function. Bear in mind when handling such exceptions that other 217 // * concurrently executing functions could also have thrown 218 // * exceptions, or would have done so if the first exception had 219 // * not occurred. 220 // * 221 // * <p>Speedups for parallel compared to sequential forms are common 222 // * but not guaranteed. Parallel operations involving brief functions 223 // * on small maps may execute more slowly than sequential forms if the 224 // * underlying work to parallelize the computation is more expensive 225 // * than the computation itself. Similarly, parallelization may not 226 // * lead to much actual parallelism if all processors are busy 227 // * performing unrelated tasks. 228 // * 229 // * <p>All arguments to all task methods must be non-null. 230 // * 231 // * <p>This class is a member of the 232 // * <a href="{@docRoot}/java.base/java/util/package-summary.html#CollectionsFramework"> 233 // * Java Collections Framework</a>. 234 // * 235 // * @author Doug Lea 236 // * @param <K> the type of keys maintained by this map 237 // * @param <V> the type of mapped values 238 // */ 239 // public class ConcurrentHashMap(K, V) : AbstractMap!(K, V), ConcurrentMap!(K, V) { // , Serializable 240 241 // /* 242 // * Overview: 243 // * 244 // * The primary design goal of this hash table is to maintain 245 // * concurrent readability (typically method get(), but also 246 // * iterators and related methods) while minimizing update 247 // * contention. Secondary goals are to keep space consumption about 248 // * the same or better than java.util.HashMap, and to support high 249 // * initial insertion rates on an empty table by many threads. 250 // * 251 // * This map usually acts as a binned (bucketed) hash table. Each 252 // * key-value mapping is held in a Node. Most nodes are instances 253 // * of the basic Node class with hash, key, value, and next 254 // * fields. However, various subclasses exist: TreeNodes are 255 // * arranged in balanced trees, not lists. TreeBins hold the roots 256 // * of sets of TreeNodes. ForwardingNodes are placed at the heads 257 // * of bins during resizing. ReservationNodes are used as 258 // * placeholders while establishing values in computeIfAbsent and 259 // * related methods. The types TreeBin, ForwardingNode, and 260 // * ReservationNode do not hold normal user keys, values, or 261 // * hashes, and are readily distinguishable during search etc 262 // * because they have negative hash fields and null key and value 263 // * fields. (These special nodes are either uncommon or transient, 264 // * so the impact of carrying around some unused fields is 265 // * insignificant.) 266 // * 267 // * The table is lazily initialized to a power-of-two size upon the 268 // * first insertion. Each bin in the table normally contains a 269 // * list of Nodes (most often, the list has only zero or one Node). 270 // * Table accesses require volatile/atomic reads, writes, and 271 // * CASes. Because there is no other way to arrange this without 272 // * adding further indirections, we use intrinsics 273 // * (jdk.internal.misc.Unsafe) operations. 274 // * 275 // * We use the top (sign) bit of Node hash fields for control 276 // * purposes -- it is available anyway because of addressing 277 // * constraints. Nodes with negative hash fields are specially 278 // * handled or ignored in map methods. 279 // * 280 // * Insertion (via put or its variants) of the first node in an 281 // * empty bin is performed by just CASing it to the bin. This is 282 // * by far the most common case for put operations under most 283 // * key/hash distributions. Other update operations (insert, 284 // * delete, and replace) require locks. We do not want to waste 285 // * the space required to associate a distinct lock object with 286 // * each bin, so instead use the first node of a bin list itself as 287 // * a lock. Locking support for these locks relies on builtin 288 // * "synchronized" monitors. 289 // * 290 // * Using the first node of a list as a lock does not by itself 291 // * suffice though: When a node is locked, any update must first 292 // * validate that it is still the first node after locking it, and 293 // * retry if not. Because new nodes are always appended to lists, 294 // * once a node is first in a bin, it remains first until deleted 295 // * or the bin becomes invalidated (upon resizing). 296 // * 297 // * The main disadvantage of per-bin locks is that other update 298 // * operations on other nodes in a bin list protected by the same 299 // * lock can stall, for example when user equals() or mapping 300 // * functions take a long time. However, statistically, under 301 // * random hash codes, this is not a common problem. Ideally, the 302 // * frequency of nodes in bins follows a Poisson distribution 303 // * (http://en.wikipedia.org/wiki/Poisson_distribution) with a 304 // * parameter of about 0.5 on average, given the resizing threshold 305 // * of 0.75, although with a large variance because of resizing 306 // * granularity. Ignoring variance, the expected occurrences of 307 // * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The 308 // * first values are: 309 // * 310 // * 0: 0.60653066 311 // * 1: 0.30326533 312 // * 2: 0.07581633 313 // * 3: 0.01263606 314 // * 4: 0.00157952 315 // * 5: 0.00015795 316 // * 6: 0.00001316 317 // * 7: 0.00000094 318 // * 8: 0.00000006 319 // * more: less than 1 in ten million 320 // * 321 // * Lock contention probability for two threads accessing distinct 322 // * elements is roughly 1 / (8 * #elements) under random hashes. 323 // * 324 // * Actual hash code distributions encountered in practice 325 // * sometimes deviate significantly from uniform randomness. This 326 // * includes the case when N > (1<<30), so some keys MUST collide. 327 // * Similarly for dumb or hostile usages in which multiple keys are 328 // * designed to have identical hash codes or ones that differs only 329 // * in masked-out high bits. So we use a secondary strategy that 330 // * applies when the number of nodes in a bin exceeds a 331 // * threshold. These TreeBins use a balanced tree to hold nodes (a 332 // * specialized form of red-black trees), bounding search time to 333 // * O(log N). Each search step in a TreeBin is at least twice as 334 // * slow as in a regular list, but given that N cannot exceed 335 // * (1<<64) (before running out of addresses) this bounds search 336 // * steps, lock hold times, etc, to reasonable constants (roughly 337 // * 100 nodes inspected per operation worst case) so long as keys 338 // * are Comparable (which is very common -- String, Long, etc). 339 // * TreeBin nodes (TreeNodes) also maintain the same "next" 340 // * traversal pointers as regular nodes, so can be traversed in 341 // * iterators in the same way. 342 // * 343 // * The table is resized when occupancy exceeds a percentage 344 // * threshold (nominally, 0.75, but see below). Any thread 345 // * noticing an overfull bin may assist in resizing after the 346 // * initiating thread allocates and sets up the replacement array. 347 // * However, rather than stalling, these other threads may proceed 348 // * with insertions etc. The use of TreeBins shields us from the 349 // * worst case effects of overfilling while resizes are in 350 // * progress. Resizing proceeds by transferring bins, one by one, 351 // * from the table to the next table. However, threads claim small 352 // * blocks of indices to transfer (via field transferIndex) before 353 // * doing so, reducing contention. A generation stamp in field 354 // * sizeCtl ensures that resizings do not overlap. Because we are 355 // * using power-of-two expansion, the elements from each bin must 356 // * either stay at same index, or move with a power of two 357 // * offset. We eliminate unnecessary node creation by catching 358 // * cases where old nodes can be reused because their next fields 359 // * won't change. On average, only about one-sixth of them need 360 // * cloning when a table doubles. The nodes they replace will be 361 // * garbage collectible as soon as they are no longer referenced by 362 // * any reader thread that may be in the midst of concurrently 363 // * traversing table. Upon transfer, the old table bin contains 364 // * only a special forwarding node (with hash field "MOVED") that 365 // * contains the next table as its key. On encountering a 366 // * forwarding node, access and update operations restart, using 367 // * the new table. 368 // * 369 // * Each bin transfer requires its bin lock, which can stall 370 // * waiting for locks while resizing. However, because other 371 // * threads can join in and help resize rather than contend for 372 // * locks, average aggregate waits become shorter as resizing 373 // * progresses. The transfer operation must also ensure that all 374 // * accessible bins in both the old and new table are usable by any 375 // * traversal. This is arranged in part by proceeding from the 376 // * last bin (table.length - 1) up towards the first. Upon seeing 377 // * a forwarding node, traversals (see class Traverser) arrange to 378 // * move to the new table without revisiting nodes. To ensure that 379 // * no intervening nodes are skipped even when moved out of order, 380 // * a stack (see class TableStack) is created on first encounter of 381 // * a forwarding node during a traversal, to maintain its place if 382 // * later processing the current table. The need for these 383 // * save/restore mechanics is relatively rare, but when one 384 // * forwarding node is encountered, typically many more will be. 385 // * So Traversers use a simple caching scheme to avoid creating so 386 // * many new TableStack nodes. (Thanks to Peter Levart for 387 // * suggesting use of a stack here.) 388 // * 389 // * The traversal scheme also applies to partial traversals of 390 // * ranges of bins (via an alternate Traverser constructor) 391 // * to support partitioned aggregate operations. Also, read-only 392 // * operations give up if ever forwarded to a null table, which 393 // * provides support for shutdown-style clearing, which is also not 394 // * currently implemented. 395 // * 396 // * Lazy table initialization minimizes footprint until first use, 397 // * and also avoids resizings when the first operation is from a 398 // * putAll, constructor with map argument, or deserialization. 399 // * These cases attempt to override the initial capacity settings, 400 // * but harmlessly fail to take effect in cases of races. 401 // * 402 // * The element count is maintained using a specialization of 403 // * LongAdder. We need to incorporate a specialization rather than 404 // * just use a LongAdder in order to access implicit 405 // * contention-sensing that leads to creation of multiple 406 // * CounterCells. The counter mechanics avoid contention on 407 // * updates but can encounter cache thrashing if read too 408 // * frequently during concurrent access. To avoid reading so often, 409 // * resizing under contention is attempted only upon adding to a 410 // * bin already holding two or more nodes. Under uniform hash 411 // * distributions, the probability of this occurring at threshold 412 // * is around 13%, meaning that only about 1 in 8 puts check 413 // * threshold (and after resizing, many fewer do so). 414 // * 415 // * TreeBins use a special form of comparison for search and 416 // * related operations (which is the main reason we cannot use 417 // * existing collections such as TreeMaps). TreeBins contain 418 // * Comparable elements, but may contain others, as well as 419 // * elements that are Comparable but not necessarily Comparable for 420 // * the same T, so we cannot invoke compareTo among them. To handle 421 // * this, the tree is ordered primarily by hash value, then by 422 // * Comparable.compareTo order if applicable. On lookup at a node, 423 // * if elements are not comparable or compare as 0 then both left 424 // * and right children may need to be searched in the case of tied 425 // * hash values. (This corresponds to the full list search that 426 // * would be necessary if all elements were non-Comparable and had 427 // * tied hashes.) On insertion, to keep a total ordering (or as 428 // * close as is required here) across rebalancings, we compare 429 // * classes and identityHashCodes as tie-breakers. The red-black 430 // * balancing code is updated from pre-jdk-collections 431 // * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) 432 // * based in turn on Cormen, Leiserson, and Rivest "Introduction to 433 // * Algorithms" (CLR). 434 // * 435 // * TreeBins also require an additional locking mechanism. While 436 // * list traversal is always possible by readers even during 437 // * updates, tree traversal is not, mainly because of tree-rotations 438 // * that may change the root node and/or its linkages. TreeBins 439 // * include a simple read-write lock mechanism parasitic on the 440 // * main bin-synchronization strategy: Structural adjustments 441 // * associated with an insertion or removal are already bin-locked 442 // * (and so cannot conflict with other writers) but must wait for 443 // * ongoing readers to finish. Since there can be only one such 444 // * waiter, we use a simple scheme using a single "waiter" field to 445 // * block writers. However, readers need never block. If the root 446 // * lock is held, they proceed along the slow traversal path (via 447 // * next-pointers) until the lock becomes available or the list is 448 // * exhausted, whichever comes first. These cases are not fast, but 449 // * maximize aggregate expected throughput. 450 // * 451 // * Maintaining API and serialization compatibility with previous 452 // * versions of this class introduces several oddities. Mainly: We 453 // * leave untouched but unused constructor arguments referring to 454 // * concurrencyLevel. We accept a loadFactor constructor argument, 455 // * but apply it only to initial table capacity (which is the only 456 // * time that we can guarantee to honor it.) We also declare an 457 // * unused "Segment" class that is instantiated in minimal form 458 // * only when serializing. 459 // * 460 // * Also, solely for compatibility with previous versions of this 461 // * class, it extends AbstractMap, even though all of its methods 462 // * are overridden, so it is just useless baggage. 463 // * 464 // * This file is organized to make things a little easier to follow 465 // * while reading than they might otherwise: First the main static 466 // * declarations and utilities, then fields, then main public 467 // * methods (with a few factorings of multiple public methods into 468 // * internal ones), then sizing methods, trees, traversers, and 469 // * bulk operations. 470 // */ 471 472 // /* ---------------- Constants -------------- */ 473 474 // /** 475 // * The largest possible table capacity. This value must be 476 // * exactly 1<<30 to stay within Java array allocation and indexing 477 // * bounds for power of two table sizes, and is further required 478 // * because the top two bits of 32bit hash fields are used for 479 // * control purposes. 480 // */ 481 // private enum int MAXIMUM_CAPACITY = 1 << 30; 482 483 // /** 484 // * The default initial table capacity. Must be a power of 2 485 // * (i.e., at least 1) and at most MAXIMUM_CAPACITY. 486 // */ 487 // private enum int DEFAULT_CAPACITY = 16; 488 489 // /** 490 // * The largest possible (non-power of two) array size. 491 // * Needed by toArray and related methods. 492 // */ 493 // enum int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; 494 495 // /** 496 // * The default concurrency level for this table. Unused but 497 // * defined for compatibility with previous versions of this class. 498 // */ 499 // private enum int DEFAULT_CONCURRENCY_LEVEL = 16; 500 501 // /** 502 // * The load factor for this table. Overrides of this value in 503 // * constructors affect only the initial table capacity. The 504 // * actual floating point value isn't normally used -- it is 505 // * simpler to use expressions such as {@code n - (n >>> 2)} for 506 // * the associated resizing threshold. 507 // */ 508 // private enum float LOAD_FACTOR = 0.75f; 509 510 // /** 511 // * The bin count threshold for using a tree rather than list for a 512 // * bin. Bins are converted to trees when adding an element to a 513 // * bin with at least this many nodes. The value must be greater 514 // * than 2, and should be at least 8 to mesh with assumptions in 515 // * tree removal about conversion back to plain bins upon 516 // * shrinkage. 517 // */ 518 // enum int TREEIFY_THRESHOLD = 8; 519 520 // /** 521 // * The bin count threshold for untreeifying a (split) bin during a 522 // * resize operation. Should be less than TREEIFY_THRESHOLD, and at 523 // * most 6 to mesh with shrinkage detection under removal. 524 // */ 525 // enum int UNTREEIFY_THRESHOLD = 6; 526 527 // /** 528 // * The smallest table capacity for which bins may be treeified. 529 // * (Otherwise the table is resized if too many nodes in a bin.) 530 // * The value should be at least 4 * TREEIFY_THRESHOLD to avoid 531 // * conflicts between resizing and treeification thresholds. 532 // */ 533 // enum int MIN_TREEIFY_CAPACITY = 64; 534 535 // /** 536 // * Minimum number of rebinnings per transfer step. Ranges are 537 // * subdivided to allow multiple resizer threads. This value 538 // * serves as a lower bound to avoid resizers encountering 539 // * excessive memory contention. The value should be at least 540 // * DEFAULT_CAPACITY. 541 // */ 542 // private enum int MIN_TRANSFER_STRIDE = 16; 543 544 // /** 545 // * The number of bits used for generation stamp in sizeCtl. 546 // * Must be at least 6 for 32bit arrays. 547 // */ 548 // private enum int RESIZE_STAMP_BITS = 16; 549 550 // /** 551 // * The maximum number of threads that can help resize. 552 // * Must fit in 32 - RESIZE_STAMP_BITS bits. 553 // */ 554 // private enum int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1; 555 556 // /** 557 // * The bit shift for recording size stamp in sizeCtl. 558 // */ 559 // private enum int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS; 560 561 // /* 562 // * Encodings for Node hash fields. See above for explanation. 563 // */ 564 // enum int MOVED = -1; // hash for forwarding nodes 565 // enum int TREEBIN = -2; // hash for roots of trees 566 // enum int RESERVED = -3; // hash for transient reservations 567 // enum int HASH_BITS = 0x7fffffff; // usable bits of normal node hash 568 569 // /** Number of CPUS, to place bounds on some sizings */ 570 // enum int NCPU = Runtime.getRuntime().availableProcessors(); 571 572 // /** 573 // * Serialized pseudo-fields, provided only for jdk7 compatibility. 574 // * @serialField segments Segment[] 575 // * The segments, each of which is a specialized hash table. 576 // * @serialField segmentMask int 577 // * Mask value for indexing into segments. The upper bits of a 578 // * key's hash code are used to choose the segment. 579 // * @serialField segmentShift int 580 // * Shift value for indexing within segments. 581 // */ 582 // private static final ObjectStreamField[] serialPersistentFields = { 583 // new ObjectStreamField("segments", Segment[].class), 584 // new ObjectStreamField("segmentMask", Integer.TYPE), 585 // new ObjectStreamField("segmentShift", Integer.TYPE), 586 // }; 587 588 // /* ---------------- Nodes -------------- */ 589 590 // /** 591 // * Key-value entry. This class is never exported out as a 592 // * user-mutable Map.Entry (i.e., one supporting setValue; see 593 // * MapEntry below), but can be used for read-only traversals used 594 // * in bulk tasks. Subclasses of Node with a negative hash field 595 // * are special, and contain null keys and values (but are never 596 // * exported). Otherwise, keys and vals are never null. 597 // */ 598 // static class Node!(K, V) implements Map.Entry!(K, V) { 599 // final int hash; 600 // final K key; 601 // volatile V val; 602 // volatile Node!(K, V) next; 603 604 // Node(int hash, K key, V val) { 605 // this.hash = hash; 606 // this.key = key; 607 // this.val = val; 608 // } 609 610 // Node(int hash, K key, V val, Node!(K, V) next) { 611 // this(hash, key, val); 612 // this.next = next; 613 // } 614 615 // public final K getKey() { return key; } 616 // public final V getValue() { return val; } 617 // public final int hashCode() { return key.hashCode() ^ val.hashCode(); } 618 // public final String toString() { 619 // return Helpers.mapEntryToString(key, val); 620 // } 621 // public final V setValue(V value) { 622 // throw new UnsupportedOperationException(); 623 // } 624 625 // public final boolean equals(Object o) { 626 // Object k, v, u; Map.Entry<?,?> e; 627 // return ((o instanceof Map.Entry) && 628 // (k = (e = (Map.Entry<?,?>)o).getKey()) != null && 629 // (v = e.getValue()) != null && 630 // (k == key || k.equals(key)) && 631 // (v == (u = val) || v.equals(u))); 632 // } 633 634 // /** 635 // * Virtualized support for map.get(); overridden in subclasses. 636 // */ 637 // Node!(K, V) find(int h, Object k) { 638 // Node!(K, V) e = this; 639 // if (k != null) { 640 // do { 641 // K ek; 642 // if (e.hash == h && 643 // ((ek = e.key) == k || (ek != null && k.equals(ek)))) 644 // return e; 645 // } while ((e = e.next) != null); 646 // } 647 // return null; 648 // } 649 // } 650 651 // /* ---------------- Static utilities -------------- */ 652 653 // /** 654 // * Spreads (XORs) higher bits of hash to lower and also forces top 655 // * bit to 0. Because the table uses power-of-two masking, sets of 656 // * hashes that vary only in bits above the current mask will 657 // * always collide. (Among known examples are sets of Float keys 658 // * holding consecutive whole numbers in small tables.) So we 659 // * apply a transform that spreads the impact of higher bits 660 // * downward. There is a tradeoff between speed, utility, and 661 // * quality of bit-spreading. Because many common sets of hashes 662 // * are already reasonably distributed (so don't benefit from 663 // * spreading), and because we use trees to handle large sets of 664 // * collisions in bins, we just XOR some shifted bits in the 665 // * cheapest possible way to reduce systematic lossage, as well as 666 // * to incorporate impact of the highest bits that would otherwise 667 // * never be used in index calculations because of table bounds. 668 // */ 669 // static final int spread(int h) { 670 // return (h ^ (h >>> 16)) & HASH_BITS; 671 // } 672 673 // /** 674 // * Returns a power of two table size for the given desired capacity. 675 // * See Hackers Delight, sec 3.2 676 // */ 677 // private static final int tableSizeFor(int c) { 678 // int n = -1 >>> Integer.numberOfLeadingZeros(c - 1); 679 // return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; 680 // } 681 682 // /** 683 // * Returns x's Class if it is of the form "class C implements 684 // * Comparable<C>", else null. 685 // */ 686 // static Class<?> comparableClassFor(Object x) { 687 // if (x instanceof Comparable) { 688 // Class<?> c; Type[] ts, as; ParameterizedType p; 689 // if ((c = x.getClass()) == String.class) // bypass checks 690 // return c; 691 // if ((ts = c.getGenericInterfaces()) != null) { 692 // for (Type t : ts) { 693 // if ((t instanceof ParameterizedType) && 694 // ((p = (ParameterizedType)t).getRawType() == 695 // Comparable.class) && 696 // (as = p.getActualTypeArguments()) != null && 697 // as.length == 1 && as[0] == c) // type arg is c 698 // return c; 699 // } 700 // } 701 // } 702 // return null; 703 // } 704 705 // /** 706 // * Returns k.compareTo(x) if x matches kc (k's screened comparable 707 // * class), else 0. 708 // */ 709 // @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable 710 // static int compareComparables(Class<?> kc, Object k, Object x) { 711 // return (x == null || x.getClass() != kc ? 0 : 712 // ((Comparable)k).compareTo(x)); 713 // } 714 715 // /* ---------------- Table element access -------------- */ 716 717 // /* 718 // * Atomic access methods are used for table elements as well as 719 // * elements of in-progress next table while resizing. All uses of 720 // * the tab arguments must be null checked by callers. All callers 721 // * also paranoically precheck that tab's length is not zero (or an 722 // * equivalent check), thus ensuring that any index argument taking 723 // * the form of a hash value anded with (length - 1) is a valid 724 // * index. Note that, to be correct wrt arbitrary concurrency 725 // * errors by users, these checks must operate on local variables, 726 // * which accounts for some odd-looking inline assignments below. 727 // * Note that calls to setTabAt always occur within locked regions, 728 // * and so require only release ordering. 729 // */ 730 731 732 // static final !(K, V) Node!(K, V) tabAt(Node!(K, V)[] tab, int i) { 733 // return (Node!(K, V))U.getObjectAcquire(tab, ((long)i << ASHIFT) + ABASE); 734 // } 735 736 // static final !(K, V) boolean casTabAt(Node!(K, V)[] tab, int i, 737 // Node!(K, V) c, Node!(K, V) v) { 738 // return U.compareAndSetObject(tab, ((long)i << ASHIFT) + ABASE, c, v); 739 // } 740 741 // static final !(K, V) void setTabAt(Node!(K, V)[] tab, int i, Node!(K, V) v) { 742 // U.putObjectRelease(tab, ((long)i << ASHIFT) + ABASE, v); 743 // } 744 745 // /* ---------------- Fields -------------- */ 746 747 // /** 748 // * The array of bins. Lazily initialized upon first insertion. 749 // * Size is always a power of two. Accessed directly by iterators. 750 // */ 751 // transient volatile Node!(K, V)[] table; 752 753 // /** 754 // * The next table to use; non-null only while resizing. 755 // */ 756 // private transient volatile Node!(K, V)[] nextTable; 757 758 // /** 759 // * Base counter value, used mainly when there is no contention, 760 // * but also as a fallback during table initialization 761 // * races. Updated via CAS. 762 // */ 763 // private transient volatile long baseCount; 764 765 // /** 766 // * Table initialization and resizing control. When negative, the 767 // * table is being initialized or resized: -1 for initialization, 768 // * else -(1 + the number of active resizing threads). Otherwise, 769 // * when table is null, holds the initial table size to use upon 770 // * creation, or 0 for default. After initialization, holds the 771 // * next element count value upon which to resize the table. 772 // */ 773 // private transient volatile int sizeCtl; 774 775 // /** 776 // * The next table index (plus one) to split while resizing. 777 // */ 778 // private transient volatile int transferIndex; 779 780 // /** 781 // * Spinlock (locked via CAS) used when resizing and/or creating CounterCells. 782 // */ 783 // private transient volatile int cellsBusy; 784 785 // /** 786 // * Table of counter cells. When non-null, size is a power of 2. 787 // */ 788 // private transient volatile CounterCell[] counterCells; 789 790 // // views 791 // private transient KeySetView!(K, V) keySet; 792 // private transient ValuesView!(K, V) values; 793 // private transient EntrySetView!(K, V) entrySet; 794 795 796 // /* ---------------- Public operations -------------- */ 797 798 // /** 799 // * Creates a new, empty map with the default initial table size (16). 800 // */ 801 // public ConcurrentHashMap() { 802 // } 803 804 // /** 805 // * Creates a new, empty map with an initial table size 806 // * accommodating the specified number of elements without the need 807 // * to dynamically resize. 808 // * 809 // * @param initialCapacity The implementation performs internal 810 // * sizing to accommodate this many elements. 811 // * @throws IllegalArgumentException if the initial capacity of 812 // * elements is negative 813 // */ 814 // public ConcurrentHashMap(int initialCapacity) { 815 // this(initialCapacity, LOAD_FACTOR, 1); 816 // } 817 818 // /** 819 // * Creates a new map with the same mappings as the given map. 820 // * 821 // * @param m the map 822 // */ 823 // public ConcurrentHashMap(Map<? extends K, ? extends V> m) { 824 // this.sizeCtl = DEFAULT_CAPACITY; 825 // putAll(m); 826 // } 827 828 // /** 829 // * Creates a new, empty map with an initial table size based on 830 // * the given number of elements ({@code initialCapacity}) and 831 // * initial table density ({@code loadFactor}). 832 // * 833 // * @param initialCapacity the initial capacity. The implementation 834 // * performs internal sizing to accommodate this many elements, 835 // * given the specified load factor. 836 // * @param loadFactor the load factor (table density) for 837 // * establishing the initial table size 838 // * @throws IllegalArgumentException if the initial capacity of 839 // * elements is negative or the load factor is nonpositive 840 // * 841 // */ 842 // public ConcurrentHashMap(int initialCapacity, float loadFactor) { 843 // this(initialCapacity, loadFactor, 1); 844 // } 845 846 // /** 847 // * Creates a new, empty map with an initial table size based on 848 // * the given number of elements ({@code initialCapacity}), initial 849 // * table density ({@code loadFactor}), and number of concurrently 850 // * updating threads ({@code concurrencyLevel}). 851 // * 852 // * @param initialCapacity the initial capacity. The implementation 853 // * performs internal sizing to accommodate this many elements, 854 // * given the specified load factor. 855 // * @param loadFactor the load factor (table density) for 856 // * establishing the initial table size 857 // * @param concurrencyLevel the estimated number of concurrently 858 // * updating threads. The implementation may use this value as 859 // * a sizing hint. 860 // * @throws IllegalArgumentException if the initial capacity is 861 // * negative or the load factor or concurrencyLevel are 862 // * nonpositive 863 // */ 864 // public ConcurrentHashMap(int initialCapacity, 865 // float loadFactor, int concurrencyLevel) { 866 // if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0) 867 // throw new IllegalArgumentException(); 868 // if (initialCapacity < concurrencyLevel) // Use at least as many bins 869 // initialCapacity = concurrencyLevel; // as estimated threads 870 // long size = (long)(1.0 + (long)initialCapacity / loadFactor); 871 // int cap = (size >= (long)MAXIMUM_CAPACITY) ? 872 // MAXIMUM_CAPACITY : tableSizeFor((int)size); 873 // this.sizeCtl = cap; 874 // } 875 876 // // Original (since JDK1.2) Map methods 877 878 // /** 879 // * {@inheritDoc} 880 // */ 881 // public int size() { 882 // long n = sumCount(); 883 // return ((n < 0L) ? 0 : 884 // (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE : 885 // (int)n); 886 // } 887 888 // /** 889 // * {@inheritDoc} 890 // */ 891 // public boolean isEmpty() { 892 // return sumCount() <= 0L; // ignore transient negative values 893 // } 894 895 // /** 896 // * Returns the value to which the specified key is mapped, 897 // * or {@code null} if this map contains no mapping for the key. 898 // * 899 // * <p>More formally, if this map contains a mapping from a key 900 // * {@code k} to a value {@code v} such that {@code key.equals(k)}, 901 // * then this method returns {@code v}; otherwise it returns 902 // * {@code null}. (There can be at most one such mapping.) 903 // * 904 // * @throws NullPointerException if the specified key is null 905 // */ 906 // public V get(Object key) { 907 // Node!(K, V)[] tab; Node!(K, V) e, p; int n, eh; K ek; 908 // int h = spread(key.hashCode()); 909 // if ((tab = table) != null && (n = tab.length) > 0 && 910 // (e = tabAt(tab, (n - 1) & h)) != null) { 911 // if ((eh = e.hash) == h) { 912 // if ((ek = e.key) == key || (ek != null && key.equals(ek))) 913 // return e.val; 914 // } 915 // else if (eh < 0) 916 // return (p = e.find(h, key)) != null ? p.val : null; 917 // while ((e = e.next) != null) { 918 // if (e.hash == h && 919 // ((ek = e.key) == key || (ek != null && key.equals(ek)))) 920 // return e.val; 921 // } 922 // } 923 // return null; 924 // } 925 926 // /** 927 // * Tests if the specified object is a key in this table. 928 // * 929 // * @param key possible key 930 // * @return {@code true} if and only if the specified object 931 // * is a key in this table, as determined by the 932 // * {@code equals} method; {@code false} otherwise 933 // * @throws NullPointerException if the specified key is null 934 // */ 935 // public boolean containsKey(Object key) { 936 // return get(key) != null; 937 // } 938 939 // /** 940 // * Returns {@code true} if this map maps one or more keys to the 941 // * specified value. Note: This method may require a full traversal 942 // * of the map, and is much slower than method {@code containsKey}. 943 // * 944 // * @param value value whose presence in this map is to be tested 945 // * @return {@code true} if this map maps one or more keys to the 946 // * specified value 947 // * @throws NullPointerException if the specified value is null 948 // */ 949 // public boolean containsValue(Object value) { 950 // if (value == null) 951 // throw new NullPointerException(); 952 // Node!(K, V)[] t; 953 // if ((t = table) != null) { 954 // Traverser!(K, V) it = new Traverser!(K, V)(t, t.length, 0, t.length); 955 // for (Node!(K, V) p; (p = it.advance()) != null; ) { 956 // V v; 957 // if ((v = p.val) == value || (v != null && value.equals(v))) 958 // return true; 959 // } 960 // } 961 // return false; 962 // } 963 964 // /** 965 // * Maps the specified key to the specified value in this table. 966 // * Neither the key nor the value can be null. 967 // * 968 // * <p>The value can be retrieved by calling the {@code get} method 969 // * with a key that is equal to the original key. 970 // * 971 // * @param key key with which the specified value is to be associated 972 // * @param value value to be associated with the specified key 973 // * @return the previous value associated with {@code key}, or 974 // * {@code null} if there was no mapping for {@code key} 975 // * @throws NullPointerException if the specified key or value is null 976 // */ 977 // public V put(K key, V value) { 978 // return putVal(key, value, false); 979 // } 980 981 // /** Implementation for put and putIfAbsent */ 982 // final V putVal(K key, V value, boolean onlyIfAbsent) { 983 // if (key == null || value == null) throw new NullPointerException(); 984 // int hash = spread(key.hashCode()); 985 // int binCount = 0; 986 // for (Node!(K, V)[] tab = table;;) { 987 // Node!(K, V) f; int n, i, fh; K fk; V fv; 988 // if (tab == null || (n = tab.length) == 0) 989 // tab = initTable(); 990 // else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) { 991 // if (casTabAt(tab, i, null, new Node!(K, V)(hash, key, value))) 992 // break; // no lock when adding to empty bin 993 // } 994 // else if ((fh = f.hash) == MOVED) 995 // tab = helpTransfer(tab, f); 996 // else if (onlyIfAbsent // check first node without acquiring lock 997 // && fh == hash 998 // && ((fk = f.key) == key || (fk != null && key.equals(fk))) 999 // && (fv = f.val) != null) 1000 // return fv; 1001 // else { 1002 // V oldVal = null; 1003 // synchronized (f) { 1004 // if (tabAt(tab, i) == f) { 1005 // if (fh >= 0) { 1006 // binCount = 1; 1007 // for (Node!(K, V) e = f;; ++binCount) { 1008 // K ek; 1009 // if (e.hash == hash && 1010 // ((ek = e.key) == key || 1011 // (ek != null && key.equals(ek)))) { 1012 // oldVal = e.val; 1013 // if (!onlyIfAbsent) 1014 // e.val = value; 1015 // break; 1016 // } 1017 // Node!(K, V) pred = e; 1018 // if ((e = e.next) == null) { 1019 // pred.next = new Node!(K, V)(hash, key, value); 1020 // break; 1021 // } 1022 // } 1023 // } 1024 // else if (f instanceof TreeBin) { 1025 // Node!(K, V) p; 1026 // binCount = 2; 1027 // if ((p = ((TreeBin!(K, V))f).putTreeVal(hash, key, 1028 // value)) != null) { 1029 // oldVal = p.val; 1030 // if (!onlyIfAbsent) 1031 // p.val = value; 1032 // } 1033 // } 1034 // else if (f instanceof ReservationNode) 1035 // throw new IllegalStateException("Recursive update"); 1036 // } 1037 // } 1038 // if (binCount != 0) { 1039 // if (binCount >= TREEIFY_THRESHOLD) 1040 // treeifyBin(tab, i); 1041 // if (oldVal != null) 1042 // return oldVal; 1043 // break; 1044 // } 1045 // } 1046 // } 1047 // addCount(1L, binCount); 1048 // return null; 1049 // } 1050 1051 // /** 1052 // * Copies all of the mappings from the specified map to this one. 1053 // * These mappings replace any mappings that this map had for any of the 1054 // * keys currently in the specified map. 1055 // * 1056 // * @param m mappings to be stored in this map 1057 // */ 1058 // public void putAll(Map<? extends K, ? extends V> m) { 1059 // tryPresize(m.size()); 1060 // for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) 1061 // putVal(e.getKey(), e.getValue(), false); 1062 // } 1063 1064 // /** 1065 // * Removes the key (and its corresponding value) from this map. 1066 // * This method does nothing if the key is not in the map. 1067 // * 1068 // * @param key the key that needs to be removed 1069 // * @return the previous value associated with {@code key}, or 1070 // * {@code null} if there was no mapping for {@code key} 1071 // * @throws NullPointerException if the specified key is null 1072 // */ 1073 // public V remove(Object key) { 1074 // return replaceNode(key, null, null); 1075 // } 1076 1077 // /** 1078 // * Implementation for the four public remove/replace methods: 1079 // * Replaces node value with v, conditional upon match of cv if 1080 // * non-null. If resulting value is null, delete. 1081 // */ 1082 // final V replaceNode(Object key, V value, Object cv) { 1083 // int hash = spread(key.hashCode()); 1084 // for (Node!(K, V)[] tab = table;;) { 1085 // Node!(K, V) f; int n, i, fh; 1086 // if (tab == null || (n = tab.length) == 0 || 1087 // (f = tabAt(tab, i = (n - 1) & hash)) == null) 1088 // break; 1089 // else if ((fh = f.hash) == MOVED) 1090 // tab = helpTransfer(tab, f); 1091 // else { 1092 // V oldVal = null; 1093 // boolean validated = false; 1094 // synchronized (f) { 1095 // if (tabAt(tab, i) == f) { 1096 // if (fh >= 0) { 1097 // validated = true; 1098 // for (Node!(K, V) e = f, pred = null;;) { 1099 // K ek; 1100 // if (e.hash == hash && 1101 // ((ek = e.key) == key || 1102 // (ek != null && key.equals(ek)))) { 1103 // V ev = e.val; 1104 // if (cv == null || cv == ev || 1105 // (ev != null && cv.equals(ev))) { 1106 // oldVal = ev; 1107 // if (value != null) 1108 // e.val = value; 1109 // else if (pred != null) 1110 // pred.next = e.next; 1111 // else 1112 // setTabAt(tab, i, e.next); 1113 // } 1114 // break; 1115 // } 1116 // pred = e; 1117 // if ((e = e.next) == null) 1118 // break; 1119 // } 1120 // } 1121 // else if (f instanceof TreeBin) { 1122 // validated = true; 1123 // TreeBin!(K, V) t = (TreeBin!(K, V))f; 1124 // TreeNode!(K, V) r, p; 1125 // if ((r = t.root) != null && 1126 // (p = r.findTreeNode(hash, key, null)) != null) { 1127 // V pv = p.val; 1128 // if (cv == null || cv == pv || 1129 // (pv != null && cv.equals(pv))) { 1130 // oldVal = pv; 1131 // if (value != null) 1132 // p.val = value; 1133 // else if (t.removeTreeNode(p)) 1134 // setTabAt(tab, i, untreeify(t.first)); 1135 // } 1136 // } 1137 // } 1138 // else if (f instanceof ReservationNode) 1139 // throw new IllegalStateException("Recursive update"); 1140 // } 1141 // } 1142 // if (validated) { 1143 // if (oldVal != null) { 1144 // if (value == null) 1145 // addCount(-1L, -1); 1146 // return oldVal; 1147 // } 1148 // break; 1149 // } 1150 // } 1151 // } 1152 // return null; 1153 // } 1154 1155 // /** 1156 // * Removes all of the mappings from this map. 1157 // */ 1158 // public void clear() { 1159 // long delta = 0L; // negative number of deletions 1160 // int i = 0; 1161 // Node!(K, V)[] tab = table; 1162 // while (tab != null && i < tab.length) { 1163 // int fh; 1164 // Node!(K, V) f = tabAt(tab, i); 1165 // if (f == null) 1166 // ++i; 1167 // else if ((fh = f.hash) == MOVED) { 1168 // tab = helpTransfer(tab, f); 1169 // i = 0; // restart 1170 // } 1171 // else { 1172 // synchronized (f) { 1173 // if (tabAt(tab, i) == f) { 1174 // Node!(K, V) p = (fh >= 0 ? f : 1175 // (f instanceof TreeBin) ? 1176 // ((TreeBin!(K, V))f).first : null); 1177 // while (p != null) { 1178 // --delta; 1179 // p = p.next; 1180 // } 1181 // setTabAt(tab, i++, null); 1182 // } 1183 // } 1184 // } 1185 // } 1186 // if (delta != 0L) 1187 // addCount(delta, -1); 1188 // } 1189 1190 // /** 1191 // * Returns a {@link Set} view of the keys contained in this map. 1192 // * The set is backed by the map, so changes to the map are 1193 // * reflected in the set, and vice-versa. The set supports element 1194 // * removal, which removes the corresponding mapping from this map, 1195 // * via the {@code Iterator.remove}, {@code Set.remove}, 1196 // * {@code removeAll}, {@code retainAll}, and {@code clear} 1197 // * operations. It does not support the {@code add} or 1198 // * {@code addAll} operations. 1199 // * 1200 // * <p>The view's iterators and spliterators are 1201 // * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>. 1202 // * 1203 // * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT}, 1204 // * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}. 1205 // * 1206 // * @return the set view 1207 // */ 1208 // public KeySetView!(K, V) keySet() { 1209 // KeySetView!(K, V) ks; 1210 // if ((ks = keySet) != null) return ks; 1211 // return keySet = new KeySetView!(K, V)(this, null); 1212 // } 1213 1214 // /** 1215 // * Returns a {@link Collection} view of the values contained in this map. 1216 // * The collection is backed by the map, so changes to the map are 1217 // * reflected in the collection, and vice-versa. The collection 1218 // * supports element removal, which removes the corresponding 1219 // * mapping from this map, via the {@code Iterator.remove}, 1220 // * {@code Collection.remove}, {@code removeAll}, 1221 // * {@code retainAll}, and {@code clear} operations. It does not 1222 // * support the {@code add} or {@code addAll} operations. 1223 // * 1224 // * <p>The view's iterators and spliterators are 1225 // * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>. 1226 // * 1227 // * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT} 1228 // * and {@link Spliterator#NONNULL}. 1229 // * 1230 // * @return the collection view 1231 // */ 1232 // public Collection<V> values() { 1233 // ValuesView!(K, V) vs; 1234 // if ((vs = values) != null) return vs; 1235 // return values = new ValuesView!(K, V)(this); 1236 // } 1237 1238 // /** 1239 // * Returns a {@link Set} view of the mappings contained in this map. 1240 // * The set is backed by the map, so changes to the map are 1241 // * reflected in the set, and vice-versa. The set supports element 1242 // * removal, which removes the corresponding mapping from the map, 1243 // * via the {@code Iterator.remove}, {@code Set.remove}, 1244 // * {@code removeAll}, {@code retainAll}, and {@code clear} 1245 // * operations. 1246 // * 1247 // * <p>The view's iterators and spliterators are 1248 // * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>. 1249 // * 1250 // * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT}, 1251 // * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}. 1252 // * 1253 // * @return the set view 1254 // */ 1255 // public Set<Map.Entry!(K, V)> entrySet() { 1256 // EntrySetView!(K, V) es; 1257 // if ((es = entrySet) != null) return es; 1258 // return entrySet = new EntrySetView!(K, V)(this); 1259 // } 1260 1261 // /** 1262 // * Returns the hash code value for this {@link Map}, i.e., 1263 // * the sum of, for each key-value pair in the map, 1264 // * {@code key.hashCode() ^ value.hashCode()}. 1265 // * 1266 // * @return the hash code value for this map 1267 // */ 1268 // public int hashCode() { 1269 // int h = 0; 1270 // Node!(K, V)[] t; 1271 // if ((t = table) != null) { 1272 // Traverser!(K, V) it = new Traverser!(K, V)(t, t.length, 0, t.length); 1273 // for (Node!(K, V) p; (p = it.advance()) != null; ) 1274 // h += p.key.hashCode() ^ p.val.hashCode(); 1275 // } 1276 // return h; 1277 // } 1278 1279 // /** 1280 // * Returns a string representation of this map. The string 1281 // * representation consists of a list of key-value mappings (in no 1282 // * particular order) enclosed in braces ("{@code {}}"). Adjacent 1283 // * mappings are separated by the characters {@code ", "} (comma 1284 // * and space). Each key-value mapping is rendered as the key 1285 // * followed by an equals sign ("{@code =}") followed by the 1286 // * associated value. 1287 // * 1288 // * @return a string representation of this map 1289 // */ 1290 // public String toString() { 1291 // Node!(K, V)[] t; 1292 // int f = (t = table) == null ? 0 : t.length; 1293 // Traverser!(K, V) it = new Traverser!(K, V)(t, f, 0, f); 1294 // StringBuilder sb = new StringBuilder(); 1295 // sb.append('{'); 1296 // Node!(K, V) p; 1297 // if ((p = it.advance()) != null) { 1298 // for (;;) { 1299 // K k = p.key; 1300 // V v = p.val; 1301 // sb.append(k == this ? "(this Map)" : k); 1302 // sb.append('='); 1303 // sb.append(v == this ? "(this Map)" : v); 1304 // if ((p = it.advance()) == null) 1305 // break; 1306 // sb.append(',').append(' '); 1307 // } 1308 // } 1309 // return sb.append('}').toString(); 1310 // } 1311 1312 // /** 1313 // * Compares the specified object with this map for equality. 1314 // * Returns {@code true} if the given object is a map with the same 1315 // * mappings as this map. This operation may return misleading 1316 // * results if either map is concurrently modified during execution 1317 // * of this method. 1318 // * 1319 // * @param o object to be compared for equality with this map 1320 // * @return {@code true} if the specified object is equal to this map 1321 // */ 1322 // public boolean equals(Object o) { 1323 // if (o != this) { 1324 // if (!(o instanceof Map)) 1325 // return false; 1326 // Map<?,?> m = (Map<?,?>) o; 1327 // Node!(K, V)[] t; 1328 // int f = (t = table) == null ? 0 : t.length; 1329 // Traverser!(K, V) it = new Traverser!(K, V)(t, f, 0, f); 1330 // for (Node!(K, V) p; (p = it.advance()) != null; ) { 1331 // V val = p.val; 1332 // Object v = m.get(p.key); 1333 // if (v == null || (v != val && !v.equals(val))) 1334 // return false; 1335 // } 1336 // for (Map.Entry<?,?> e : m.entrySet()) { 1337 // Object mk, mv, v; 1338 // if ((mk = e.getKey()) == null || 1339 // (mv = e.getValue()) == null || 1340 // (v = get(mk)) == null || 1341 // (mv != v && !mv.equals(v))) 1342 // return false; 1343 // } 1344 // } 1345 // return true; 1346 // } 1347 1348 // /** 1349 // * Stripped-down version of helper class used in previous version, 1350 // * declared for the sake of serialization compatibility. 1351 // */ 1352 // static class Segment!(K, V) extends ReentrantLock implements Serializable { 1353 // private static final long serialVersionUID = 2249069246763182397L; 1354 // final float loadFactor; 1355 // Segment(float lf) { this.loadFactor = lf; } 1356 // } 1357 1358 // /** 1359 // * Saves this map to a stream (that is, serializes it). 1360 // * 1361 // * @param s the stream 1362 // * @throws java.io.IOException if an I/O error occurs 1363 // * @serialData 1364 // * the serialized fields, followed by the key (Object) and value 1365 // * (Object) for each key-value mapping, followed by a null pair. 1366 // * The key-value mappings are emitted in no particular order. 1367 // */ 1368 // private void writeObject(java.io.ObjectOutputStream s) 1369 // throws java.io.IOException { 1370 // // For serialization compatibility 1371 // // Emulate segment calculation from previous version of this class 1372 // int sshift = 0; 1373 // int ssize = 1; 1374 // while (ssize < DEFAULT_CONCURRENCY_LEVEL) { 1375 // ++sshift; 1376 // ssize <<= 1; 1377 // } 1378 // int segmentShift = 32 - sshift; 1379 // int segmentMask = ssize - 1; 1380 1381 // Segment!(K, V)[] segments = (Segment!(K, V)[]) 1382 // new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL]; 1383 // for (int i = 0; i < segments.length; ++i) 1384 // segments[i] = new Segment!(K, V)(LOAD_FACTOR); 1385 // java.io.ObjectOutputStream.PutField streamFields = s.putFields(); 1386 // streamFields.put("segments", segments); 1387 // streamFields.put("segmentShift", segmentShift); 1388 // streamFields.put("segmentMask", segmentMask); 1389 // s.writeFields(); 1390 1391 // Node!(K, V)[] t; 1392 // if ((t = table) != null) { 1393 // Traverser!(K, V) it = new Traverser!(K, V)(t, t.length, 0, t.length); 1394 // for (Node!(K, V) p; (p = it.advance()) != null; ) { 1395 // s.writeObject(p.key); 1396 // s.writeObject(p.val); 1397 // } 1398 // } 1399 // s.writeObject(null); 1400 // s.writeObject(null); 1401 // } 1402 1403 // /** 1404 // * Reconstitutes this map from a stream (that is, deserializes it). 1405 // * @param s the stream 1406 // * @throws ClassNotFoundException if the class of a serialized object 1407 // * could not be found 1408 // * @throws java.io.IOException if an I/O error occurs 1409 // */ 1410 // private void readObject(java.io.ObjectInputStream s) 1411 // throws java.io.IOException, ClassNotFoundException { 1412 // /* 1413 // * To improve performance in typical cases, we create nodes 1414 // * while reading, then place in table once size is known. 1415 // * However, we must also validate uniqueness and deal with 1416 // * overpopulated bins while doing so, which requires 1417 // * specialized versions of putVal mechanics. 1418 // */ 1419 // sizeCtl = -1; // force exclusion for table construction 1420 // s.defaultReadObject(); 1421 // long size = 0L; 1422 // Node!(K, V) p = null; 1423 // for (;;) { 1424 1425 // K k = (K) s.readObject(); 1426 1427 // V v = (V) s.readObject(); 1428 // if (k != null && v != null) { 1429 // p = new Node!(K, V)(spread(k.hashCode()), k, v, p); 1430 // ++size; 1431 // } 1432 // else 1433 // break; 1434 // } 1435 // if (size == 0L) 1436 // sizeCtl = 0; 1437 // else { 1438 // long ts = (long)(1.0 + size / LOAD_FACTOR); 1439 // int n = (ts >= (long)MAXIMUM_CAPACITY) ? 1440 // MAXIMUM_CAPACITY : tableSizeFor((int)ts); 1441 1442 // Node!(K, V)[] tab = (Node!(K, V)[])new Node<?,?>[n]; 1443 // int mask = n - 1; 1444 // long added = 0L; 1445 // while (p != null) { 1446 // boolean insertAtFront; 1447 // Node!(K, V) next = p.next, first; 1448 // int h = p.hash, j = h & mask; 1449 // if ((first = tabAt(tab, j)) == null) 1450 // insertAtFront = true; 1451 // else { 1452 // K k = p.key; 1453 // if (first.hash < 0) { 1454 // TreeBin!(K, V) t = (TreeBin!(K, V))first; 1455 // if (t.putTreeVal(h, k, p.val) == null) 1456 // ++added; 1457 // insertAtFront = false; 1458 // } 1459 // else { 1460 // int binCount = 0; 1461 // insertAtFront = true; 1462 // Node!(K, V) q; K qk; 1463 // for (q = first; q != null; q = q.next) { 1464 // if (q.hash == h && 1465 // ((qk = q.key) == k || 1466 // (qk != null && k.equals(qk)))) { 1467 // insertAtFront = false; 1468 // break; 1469 // } 1470 // ++binCount; 1471 // } 1472 // if (insertAtFront && binCount >= TREEIFY_THRESHOLD) { 1473 // insertAtFront = false; 1474 // ++added; 1475 // p.next = first; 1476 // TreeNode!(K, V) hd = null, tl = null; 1477 // for (q = p; q != null; q = q.next) { 1478 // TreeNode!(K, V) t = new TreeNode!(K, V) 1479 // (q.hash, q.key, q.val, null, null); 1480 // if ((t.prev = tl) == null) 1481 // hd = t; 1482 // else 1483 // tl.next = t; 1484 // tl = t; 1485 // } 1486 // setTabAt(tab, j, new TreeBin!(K, V)(hd)); 1487 // } 1488 // } 1489 // } 1490 // if (insertAtFront) { 1491 // ++added; 1492 // p.next = first; 1493 // setTabAt(tab, j, p); 1494 // } 1495 // p = next; 1496 // } 1497 // table = tab; 1498 // sizeCtl = n - (n >>> 2); 1499 // baseCount = added; 1500 // } 1501 // } 1502 1503 // // ConcurrentMap methods 1504 1505 // /** 1506 // * {@inheritDoc} 1507 // * 1508 // * @return the previous value associated with the specified key, 1509 // * or {@code null} if there was no mapping for the key 1510 // * @throws NullPointerException if the specified key or value is null 1511 // */ 1512 // public V putIfAbsent(K key, V value) { 1513 // return putVal(key, value, true); 1514 // } 1515 1516 // /** 1517 // * {@inheritDoc} 1518 // * 1519 // * @throws NullPointerException if the specified key is null 1520 // */ 1521 // public boolean remove(Object key, Object value) { 1522 // if (key == null) 1523 // throw new NullPointerException(); 1524 // return value != null && replaceNode(key, null, value) != null; 1525 // } 1526 1527 // /** 1528 // * {@inheritDoc} 1529 // * 1530 // * @throws NullPointerException if any of the arguments are null 1531 // */ 1532 // public boolean replace(K key, V oldValue, V newValue) { 1533 // if (key == null || oldValue == null || newValue == null) 1534 // throw new NullPointerException(); 1535 // return replaceNode(key, newValue, oldValue) != null; 1536 // } 1537 1538 // /** 1539 // * {@inheritDoc} 1540 // * 1541 // * @return the previous value associated with the specified key, 1542 // * or {@code null} if there was no mapping for the key 1543 // * @throws NullPointerException if the specified key or value is null 1544 // */ 1545 // public V replace(K key, V value) { 1546 // if (key == null || value == null) 1547 // throw new NullPointerException(); 1548 // return replaceNode(key, value, null); 1549 // } 1550 1551 // // Overrides of JDK8+ Map extension method defaults 1552 1553 // /** 1554 // * Returns the value to which the specified key is mapped, or the 1555 // * given default value if this map contains no mapping for the 1556 // * key. 1557 // * 1558 // * @param key the key whose associated value is to be returned 1559 // * @param defaultValue the value to return if this map contains 1560 // * no mapping for the given key 1561 // * @return the mapping for the key, if present; else the default value 1562 // * @throws NullPointerException if the specified key is null 1563 // */ 1564 // public V getOrDefault(Object key, V defaultValue) { 1565 // V v; 1566 // return (v = get(key)) == null ? defaultValue : v; 1567 // } 1568 1569 // public void forEach(BiConsumer<? super K, ? super V> action) { 1570 // if (action == null) throw new NullPointerException(); 1571 // Node!(K, V)[] t; 1572 // if ((t = table) != null) { 1573 // Traverser!(K, V) it = new Traverser!(K, V)(t, t.length, 0, t.length); 1574 // for (Node!(K, V) p; (p = it.advance()) != null; ) { 1575 // action.accept(p.key, p.val); 1576 // } 1577 // } 1578 // } 1579 1580 // public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) { 1581 // if (function == null) throw new NullPointerException(); 1582 // Node!(K, V)[] t; 1583 // if ((t = table) != null) { 1584 // Traverser!(K, V) it = new Traverser!(K, V)(t, t.length, 0, t.length); 1585 // for (Node!(K, V) p; (p = it.advance()) != null; ) { 1586 // V oldValue = p.val; 1587 // for (K key = p.key;;) { 1588 // V newValue = function.apply(key, oldValue); 1589 // if (newValue == null) 1590 // throw new NullPointerException(); 1591 // if (replaceNode(key, newValue, oldValue) != null || 1592 // (oldValue = get(key)) == null) 1593 // break; 1594 // } 1595 // } 1596 // } 1597 // } 1598 1599 // /** 1600 // * Helper method for EntrySetView.removeIf. 1601 // */ 1602 // boolean removeEntryIf(Predicate<? super Entry!(K, V)> function) { 1603 // if (function == null) throw new NullPointerException(); 1604 // Node!(K, V)[] t; 1605 // boolean removed = false; 1606 // if ((t = table) != null) { 1607 // Traverser!(K, V) it = new Traverser!(K, V)(t, t.length, 0, t.length); 1608 // for (Node!(K, V) p; (p = it.advance()) != null; ) { 1609 // K k = p.key; 1610 // V v = p.val; 1611 // Map.Entry!(K, V) e = new AbstractMap.SimpleImmutableEntry<>(k, v); 1612 // if (function.test(e) && replaceNode(k, null, v) != null) 1613 // removed = true; 1614 // } 1615 // } 1616 // return removed; 1617 // } 1618 1619 // /** 1620 // * Helper method for ValuesView.removeIf. 1621 // */ 1622 // boolean removeValueIf(Predicate<? super V> function) { 1623 // if (function == null) throw new NullPointerException(); 1624 // Node!(K, V)[] t; 1625 // boolean removed = false; 1626 // if ((t = table) != null) { 1627 // Traverser!(K, V) it = new Traverser!(K, V)(t, t.length, 0, t.length); 1628 // for (Node!(K, V) p; (p = it.advance()) != null; ) { 1629 // K k = p.key; 1630 // V v = p.val; 1631 // if (function.test(v) && replaceNode(k, null, v) != null) 1632 // removed = true; 1633 // } 1634 // } 1635 // return removed; 1636 // } 1637 1638 // /** 1639 // * If the specified key is not already associated with a value, 1640 // * attempts to compute its value using the given mapping function 1641 // * and enters it into this map unless {@code null}. The entire 1642 // * method invocation is performed atomically, so the function is 1643 // * applied at most once per key. Some attempted update operations 1644 // * on this map by other threads may be blocked while computation 1645 // * is in progress, so the computation should be short and simple, 1646 // * and must not attempt to update any other mappings of this map. 1647 // * 1648 // * @param key key with which the specified value is to be associated 1649 // * @param mappingFunction the function to compute a value 1650 // * @return the current (existing or computed) value associated with 1651 // * the specified key, or null if the computed value is null 1652 // * @throws NullPointerException if the specified key or mappingFunction 1653 // * is null 1654 // * @throws IllegalStateException if the computation detectably 1655 // * attempts a recursive update to this map that would 1656 // * otherwise never complete 1657 // * @throws RuntimeException or Error if the mappingFunction does so, 1658 // * in which case the mapping is left unestablished 1659 // */ 1660 // public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) { 1661 // if (key == null || mappingFunction == null) 1662 // throw new NullPointerException(); 1663 // int h = spread(key.hashCode()); 1664 // V val = null; 1665 // int binCount = 0; 1666 // for (Node!(K, V)[] tab = table;;) { 1667 // Node!(K, V) f; int n, i, fh; K fk; V fv; 1668 // if (tab == null || (n = tab.length) == 0) 1669 // tab = initTable(); 1670 // else if ((f = tabAt(tab, i = (n - 1) & h)) == null) { 1671 // Node!(K, V) r = new ReservationNode!(K, V)(); 1672 // synchronized (r) { 1673 // if (casTabAt(tab, i, null, r)) { 1674 // binCount = 1; 1675 // Node!(K, V) node = null; 1676 // try { 1677 // if ((val = mappingFunction.apply(key)) != null) 1678 // node = new Node!(K, V)(h, key, val); 1679 // } finally { 1680 // setTabAt(tab, i, node); 1681 // } 1682 // } 1683 // } 1684 // if (binCount != 0) 1685 // break; 1686 // } 1687 // else if ((fh = f.hash) == MOVED) 1688 // tab = helpTransfer(tab, f); 1689 // else if (fh == h // check first node without acquiring lock 1690 // && ((fk = f.key) == key || (fk != null && key.equals(fk))) 1691 // && (fv = f.val) != null) 1692 // return fv; 1693 // else { 1694 // boolean added = false; 1695 // synchronized (f) { 1696 // if (tabAt(tab, i) == f) { 1697 // if (fh >= 0) { 1698 // binCount = 1; 1699 // for (Node!(K, V) e = f;; ++binCount) { 1700 // K ek; 1701 // if (e.hash == h && 1702 // ((ek = e.key) == key || 1703 // (ek != null && key.equals(ek)))) { 1704 // val = e.val; 1705 // break; 1706 // } 1707 // Node!(K, V) pred = e; 1708 // if ((e = e.next) == null) { 1709 // if ((val = mappingFunction.apply(key)) != null) { 1710 // if (pred.next != null) 1711 // throw new IllegalStateException("Recursive update"); 1712 // added = true; 1713 // pred.next = new Node!(K, V)(h, key, val); 1714 // } 1715 // break; 1716 // } 1717 // } 1718 // } 1719 // else if (f instanceof TreeBin) { 1720 // binCount = 2; 1721 // TreeBin!(K, V) t = (TreeBin!(K, V))f; 1722 // TreeNode!(K, V) r, p; 1723 // if ((r = t.root) != null && 1724 // (p = r.findTreeNode(h, key, null)) != null) 1725 // val = p.val; 1726 // else if ((val = mappingFunction.apply(key)) != null) { 1727 // added = true; 1728 // t.putTreeVal(h, key, val); 1729 // } 1730 // } 1731 // else if (f instanceof ReservationNode) 1732 // throw new IllegalStateException("Recursive update"); 1733 // } 1734 // } 1735 // if (binCount != 0) { 1736 // if (binCount >= TREEIFY_THRESHOLD) 1737 // treeifyBin(tab, i); 1738 // if (!added) 1739 // return val; 1740 // break; 1741 // } 1742 // } 1743 // } 1744 // if (val != null) 1745 // addCount(1L, binCount); 1746 // return val; 1747 // } 1748 1749 // /** 1750 // * If the value for the specified key is present, attempts to 1751 // * compute a new mapping given the key and its current mapped 1752 // * value. The entire method invocation is performed atomically. 1753 // * Some attempted update operations on this map by other threads 1754 // * may be blocked while computation is in progress, so the 1755 // * computation should be short and simple, and must not attempt to 1756 // * update any other mappings of this map. 1757 // * 1758 // * @param key key with which a value may be associated 1759 // * @param remappingFunction the function to compute a value 1760 // * @return the new value associated with the specified key, or null if none 1761 // * @throws NullPointerException if the specified key or remappingFunction 1762 // * is null 1763 // * @throws IllegalStateException if the computation detectably 1764 // * attempts a recursive update to this map that would 1765 // * otherwise never complete 1766 // * @throws RuntimeException or Error if the remappingFunction does so, 1767 // * in which case the mapping is unchanged 1768 // */ 1769 // public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) { 1770 // if (key == null || remappingFunction == null) 1771 // throw new NullPointerException(); 1772 // int h = spread(key.hashCode()); 1773 // V val = null; 1774 // int delta = 0; 1775 // int binCount = 0; 1776 // for (Node!(K, V)[] tab = table;;) { 1777 // Node!(K, V) f; int n, i, fh; 1778 // if (tab == null || (n = tab.length) == 0) 1779 // tab = initTable(); 1780 // else if ((f = tabAt(tab, i = (n - 1) & h)) == null) 1781 // break; 1782 // else if ((fh = f.hash) == MOVED) 1783 // tab = helpTransfer(tab, f); 1784 // else { 1785 // synchronized (f) { 1786 // if (tabAt(tab, i) == f) { 1787 // if (fh >= 0) { 1788 // binCount = 1; 1789 // for (Node!(K, V) e = f, pred = null;; ++binCount) { 1790 // K ek; 1791 // if (e.hash == h && 1792 // ((ek = e.key) == key || 1793 // (ek != null && key.equals(ek)))) { 1794 // val = remappingFunction.apply(key, e.val); 1795 // if (val != null) 1796 // e.val = val; 1797 // else { 1798 // delta = -1; 1799 // Node!(K, V) en = e.next; 1800 // if (pred != null) 1801 // pred.next = en; 1802 // else 1803 // setTabAt(tab, i, en); 1804 // } 1805 // break; 1806 // } 1807 // pred = e; 1808 // if ((e = e.next) == null) 1809 // break; 1810 // } 1811 // } 1812 // else if (f instanceof TreeBin) { 1813 // binCount = 2; 1814 // TreeBin!(K, V) t = (TreeBin!(K, V))f; 1815 // TreeNode!(K, V) r, p; 1816 // if ((r = t.root) != null && 1817 // (p = r.findTreeNode(h, key, null)) != null) { 1818 // val = remappingFunction.apply(key, p.val); 1819 // if (val != null) 1820 // p.val = val; 1821 // else { 1822 // delta = -1; 1823 // if (t.removeTreeNode(p)) 1824 // setTabAt(tab, i, untreeify(t.first)); 1825 // } 1826 // } 1827 // } 1828 // else if (f instanceof ReservationNode) 1829 // throw new IllegalStateException("Recursive update"); 1830 // } 1831 // } 1832 // if (binCount != 0) 1833 // break; 1834 // } 1835 // } 1836 // if (delta != 0) 1837 // addCount((long)delta, binCount); 1838 // return val; 1839 // } 1840 1841 // /** 1842 // * Attempts to compute a mapping for the specified key and its 1843 // * current mapped value (or {@code null} if there is no current 1844 // * mapping). The entire method invocation is performed atomically. 1845 // * Some attempted update operations on this map by other threads 1846 // * may be blocked while computation is in progress, so the 1847 // * computation should be short and simple, and must not attempt to 1848 // * update any other mappings of this Map. 1849 // * 1850 // * @param key key with which the specified value is to be associated 1851 // * @param remappingFunction the function to compute a value 1852 // * @return the new value associated with the specified key, or null if none 1853 // * @throws NullPointerException if the specified key or remappingFunction 1854 // * is null 1855 // * @throws IllegalStateException if the computation detectably 1856 // * attempts a recursive update to this map that would 1857 // * otherwise never complete 1858 // * @throws RuntimeException or Error if the remappingFunction does so, 1859 // * in which case the mapping is unchanged 1860 // */ 1861 // public V compute(K key, 1862 // BiFunction<? super K, ? super V, ? extends V> remappingFunction) { 1863 // if (key == null || remappingFunction == null) 1864 // throw new NullPointerException(); 1865 // int h = spread(key.hashCode()); 1866 // V val = null; 1867 // int delta = 0; 1868 // int binCount = 0; 1869 // for (Node!(K, V)[] tab = table;;) { 1870 // Node!(K, V) f; int n, i, fh; 1871 // if (tab == null || (n = tab.length) == 0) 1872 // tab = initTable(); 1873 // else if ((f = tabAt(tab, i = (n - 1) & h)) == null) { 1874 // Node!(K, V) r = new ReservationNode!(K, V)(); 1875 // synchronized (r) { 1876 // if (casTabAt(tab, i, null, r)) { 1877 // binCount = 1; 1878 // Node!(K, V) node = null; 1879 // try { 1880 // if ((val = remappingFunction.apply(key, null)) != null) { 1881 // delta = 1; 1882 // node = new Node!(K, V)(h, key, val); 1883 // } 1884 // } finally { 1885 // setTabAt(tab, i, node); 1886 // } 1887 // } 1888 // } 1889 // if (binCount != 0) 1890 // break; 1891 // } 1892 // else if ((fh = f.hash) == MOVED) 1893 // tab = helpTransfer(tab, f); 1894 // else { 1895 // synchronized (f) { 1896 // if (tabAt(tab, i) == f) { 1897 // if (fh >= 0) { 1898 // binCount = 1; 1899 // for (Node!(K, V) e = f, pred = null;; ++binCount) { 1900 // K ek; 1901 // if (e.hash == h && 1902 // ((ek = e.key) == key || 1903 // (ek != null && key.equals(ek)))) { 1904 // val = remappingFunction.apply(key, e.val); 1905 // if (val != null) 1906 // e.val = val; 1907 // else { 1908 // delta = -1; 1909 // Node!(K, V) en = e.next; 1910 // if (pred != null) 1911 // pred.next = en; 1912 // else 1913 // setTabAt(tab, i, en); 1914 // } 1915 // break; 1916 // } 1917 // pred = e; 1918 // if ((e = e.next) == null) { 1919 // val = remappingFunction.apply(key, null); 1920 // if (val != null) { 1921 // if (pred.next != null) 1922 // throw new IllegalStateException("Recursive update"); 1923 // delta = 1; 1924 // pred.next = new Node!(K, V)(h, key, val); 1925 // } 1926 // break; 1927 // } 1928 // } 1929 // } 1930 // else if (f instanceof TreeBin) { 1931 // binCount = 1; 1932 // TreeBin!(K, V) t = (TreeBin!(K, V))f; 1933 // TreeNode!(K, V) r, p; 1934 // if ((r = t.root) != null) 1935 // p = r.findTreeNode(h, key, null); 1936 // else 1937 // p = null; 1938 // V pv = (p == null) ? null : p.val; 1939 // val = remappingFunction.apply(key, pv); 1940 // if (val != null) { 1941 // if (p != null) 1942 // p.val = val; 1943 // else { 1944 // delta = 1; 1945 // t.putTreeVal(h, key, val); 1946 // } 1947 // } 1948 // else if (p != null) { 1949 // delta = -1; 1950 // if (t.removeTreeNode(p)) 1951 // setTabAt(tab, i, untreeify(t.first)); 1952 // } 1953 // } 1954 // else if (f instanceof ReservationNode) 1955 // throw new IllegalStateException("Recursive update"); 1956 // } 1957 // } 1958 // if (binCount != 0) { 1959 // if (binCount >= TREEIFY_THRESHOLD) 1960 // treeifyBin(tab, i); 1961 // break; 1962 // } 1963 // } 1964 // } 1965 // if (delta != 0) 1966 // addCount((long)delta, binCount); 1967 // return val; 1968 // } 1969 1970 // /** 1971 // * If the specified key is not already associated with a 1972 // * (non-null) value, associates it with the given value. 1973 // * Otherwise, replaces the value with the results of the given 1974 // * remapping function, or removes if {@code null}. The entire 1975 // * method invocation is performed atomically. Some attempted 1976 // * update operations on this map by other threads may be blocked 1977 // * while computation is in progress, so the computation should be 1978 // * short and simple, and must not attempt to update any other 1979 // * mappings of this Map. 1980 // * 1981 // * @param key key with which the specified value is to be associated 1982 // * @param value the value to use if absent 1983 // * @param remappingFunction the function to recompute a value if present 1984 // * @return the new value associated with the specified key, or null if none 1985 // * @throws NullPointerException if the specified key or the 1986 // * remappingFunction is null 1987 // * @throws RuntimeException or Error if the remappingFunction does so, 1988 // * in which case the mapping is unchanged 1989 // */ 1990 // public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) { 1991 // if (key == null || value == null || remappingFunction == null) 1992 // throw new NullPointerException(); 1993 // int h = spread(key.hashCode()); 1994 // V val = null; 1995 // int delta = 0; 1996 // int binCount = 0; 1997 // for (Node!(K, V)[] tab = table;;) { 1998 // Node!(K, V) f; int n, i, fh; 1999 // if (tab == null || (n = tab.length) == 0) 2000 // tab = initTable(); 2001 // else if ((f = tabAt(tab, i = (n - 1) & h)) == null) { 2002 // if (casTabAt(tab, i, null, new Node!(K, V)(h, key, value))) { 2003 // delta = 1; 2004 // val = value; 2005 // break; 2006 // } 2007 // } 2008 // else if ((fh = f.hash) == MOVED) 2009 // tab = helpTransfer(tab, f); 2010 // else { 2011 // synchronized (f) { 2012 // if (tabAt(tab, i) == f) { 2013 // if (fh >= 0) { 2014 // binCount = 1; 2015 // for (Node!(K, V) e = f, pred = null;; ++binCount) { 2016 // K ek; 2017 // if (e.hash == h && 2018 // ((ek = e.key) == key || 2019 // (ek != null && key.equals(ek)))) { 2020 // val = remappingFunction.apply(e.val, value); 2021 // if (val != null) 2022 // e.val = val; 2023 // else { 2024 // delta = -1; 2025 // Node!(K, V) en = e.next; 2026 // if (pred != null) 2027 // pred.next = en; 2028 // else 2029 // setTabAt(tab, i, en); 2030 // } 2031 // break; 2032 // } 2033 // pred = e; 2034 // if ((e = e.next) == null) { 2035 // delta = 1; 2036 // val = value; 2037 // pred.next = new Node!(K, V)(h, key, val); 2038 // break; 2039 // } 2040 // } 2041 // } 2042 // else if (f instanceof TreeBin) { 2043 // binCount = 2; 2044 // TreeBin!(K, V) t = (TreeBin!(K, V))f; 2045 // TreeNode!(K, V) r = t.root; 2046 // TreeNode!(K, V) p = (r == null) ? null : 2047 // r.findTreeNode(h, key, null); 2048 // val = (p == null) ? value : 2049 // remappingFunction.apply(p.val, value); 2050 // if (val != null) { 2051 // if (p != null) 2052 // p.val = val; 2053 // else { 2054 // delta = 1; 2055 // t.putTreeVal(h, key, val); 2056 // } 2057 // } 2058 // else if (p != null) { 2059 // delta = -1; 2060 // if (t.removeTreeNode(p)) 2061 // setTabAt(tab, i, untreeify(t.first)); 2062 // } 2063 // } 2064 // else if (f instanceof ReservationNode) 2065 // throw new IllegalStateException("Recursive update"); 2066 // } 2067 // } 2068 // if (binCount != 0) { 2069 // if (binCount >= TREEIFY_THRESHOLD) 2070 // treeifyBin(tab, i); 2071 // break; 2072 // } 2073 // } 2074 // } 2075 // if (delta != 0) 2076 // addCount((long)delta, binCount); 2077 // return val; 2078 // } 2079 2080 // // Hashtable legacy methods 2081 2082 // /** 2083 // * Tests if some key maps into the specified value in this table. 2084 // * 2085 // * <p>Note that this method is identical in functionality to 2086 // * {@link #containsValue(Object)}, and exists solely to ensure 2087 // * full compatibility with class {@link java.util.Hashtable}, 2088 // * which supported this method prior to introduction of the 2089 // * Java Collections Framework. 2090 // * 2091 // * @param value a value to search for 2092 // * @return {@code true} if and only if some key maps to the 2093 // * {@code value} argument in this table as 2094 // * determined by the {@code equals} method; 2095 // * {@code false} otherwise 2096 // * @throws NullPointerException if the specified value is null 2097 // */ 2098 // public boolean contains(Object value) { 2099 // return containsValue(value); 2100 // } 2101 2102 // /** 2103 // * Returns an enumeration of the keys in this table. 2104 // * 2105 // * @return an enumeration of the keys in this table 2106 // * @see #keySet() 2107 // */ 2108 // public Enumeration<K> keys() { 2109 // Node!(K, V)[] t; 2110 // int f = (t = table) == null ? 0 : t.length; 2111 // return new KeyIterator!(K, V)(t, f, 0, f, this); 2112 // } 2113 2114 // /** 2115 // * Returns an enumeration of the values in this table. 2116 // * 2117 // * @return an enumeration of the values in this table 2118 // * @see #values() 2119 // */ 2120 // public Enumeration<V> elements() { 2121 // Node!(K, V)[] t; 2122 // int f = (t = table) == null ? 0 : t.length; 2123 // return new ValueIterator!(K, V)(t, f, 0, f, this); 2124 // } 2125 2126 // // ConcurrentHashMap-only methods 2127 2128 // /** 2129 // * Returns the number of mappings. This method should be used 2130 // * instead of {@link #size} because a ConcurrentHashMap may 2131 // * contain more mappings than can be represented as an int. The 2132 // * value returned is an estimate; the actual count may differ if 2133 // * there are concurrent insertions or removals. 2134 // * 2135 // * @return the number of mappings 2136 // */ 2137 // public long mappingCount() { 2138 // long n = sumCount(); 2139 // return (n < 0L) ? 0L : n; // ignore transient negative values 2140 // } 2141 2142 // /** 2143 // * Creates a new {@link Set} backed by a ConcurrentHashMap 2144 // * from the given type to {@code Boolean.TRUE}. 2145 // * 2146 // * @param <K> the element type of the returned set 2147 // * @return the new set 2148 // */ 2149 // public static <K> KeySetView<K,Boolean> newKeySet() { 2150 // return new KeySetView<K,Boolean> 2151 // (new ConcurrentHashMap<K,Boolean>(), Boolean.TRUE); 2152 // } 2153 2154 // /** 2155 // * Creates a new {@link Set} backed by a ConcurrentHashMap 2156 // * from the given type to {@code Boolean.TRUE}. 2157 // * 2158 // * @param initialCapacity The implementation performs internal 2159 // * sizing to accommodate this many elements. 2160 // * @param <K> the element type of the returned set 2161 // * @return the new set 2162 // * @throws IllegalArgumentException if the initial capacity of 2163 // * elements is negative 2164 // */ 2165 // public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) { 2166 // return new KeySetView<K,Boolean> 2167 // (new ConcurrentHashMap<K,Boolean>(initialCapacity), Boolean.TRUE); 2168 // } 2169 2170 // /** 2171 // * Returns a {@link Set} view of the keys in this map, using the 2172 // * given common mapped value for any additions (i.e., {@link 2173 // * Collection#add} and {@link Collection#addAll(Collection)}). 2174 // * This is of course only appropriate if it is acceptable to use 2175 // * the same value for all additions from this view. 2176 // * 2177 // * @param mappedValue the mapped value to use for any additions 2178 // * @return the set view 2179 // * @throws NullPointerException if the mappedValue is null 2180 // */ 2181 // public KeySetView!(K, V) keySet(V mappedValue) { 2182 // if (mappedValue == null) 2183 // throw new NullPointerException(); 2184 // return new KeySetView!(K, V)(this, mappedValue); 2185 // } 2186 2187 // /* ---------------- Special Nodes -------------- */ 2188 2189 // /** 2190 // * A node inserted at head of bins during transfer operations. 2191 // */ 2192 // static final class ForwardingNode!(K, V) extends Node!(K, V) { 2193 // final Node!(K, V)[] nextTable; 2194 // ForwardingNode(Node!(K, V)[] tab) { 2195 // super(MOVED, null, null); 2196 // this.nextTable = tab; 2197 // } 2198 2199 // Node!(K, V) find(int h, Object k) { 2200 // // loop to avoid arbitrarily deep recursion on forwarding nodes 2201 // outer: for (Node!(K, V)[] tab = nextTable;;) { 2202 // Node!(K, V) e; int n; 2203 // if (k == null || tab == null || (n = tab.length) == 0 || 2204 // (e = tabAt(tab, (n - 1) & h)) == null) 2205 // return null; 2206 // for (;;) { 2207 // int eh; K ek; 2208 // if ((eh = e.hash) == h && 2209 // ((ek = e.key) == k || (ek != null && k.equals(ek)))) 2210 // return e; 2211 // if (eh < 0) { 2212 // if (e instanceof ForwardingNode) { 2213 // tab = ((ForwardingNode!(K, V))e).nextTable; 2214 // continue outer; 2215 // } 2216 // else 2217 // return e.find(h, k); 2218 // } 2219 // if ((e = e.next) == null) 2220 // return null; 2221 // } 2222 // } 2223 // } 2224 // } 2225 2226 // /** 2227 // * A place-holder node used in computeIfAbsent and compute. 2228 // */ 2229 // static final class ReservationNode!(K, V) extends Node!(K, V) { 2230 // ReservationNode() { 2231 // super(RESERVED, null, null); 2232 // } 2233 2234 // Node!(K, V) find(int h, Object k) { 2235 // return null; 2236 // } 2237 // } 2238 2239 // /* ---------------- Table Initialization and Resizing -------------- */ 2240 2241 // /** 2242 // * Returns the stamp bits for resizing a table of size n. 2243 // * Must be negative when shifted left by RESIZE_STAMP_SHIFT. 2244 // */ 2245 // static final int resizeStamp(int n) { 2246 // return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1)); 2247 // } 2248 2249 // /** 2250 // * Initializes table, using the size recorded in sizeCtl. 2251 // */ 2252 // private final Node!(K, V)[] initTable() { 2253 // Node!(K, V)[] tab; int sc; 2254 // while ((tab = table) == null || tab.length == 0) { 2255 // if ((sc = sizeCtl) < 0) 2256 // Thread.yield(); // lost initialization race; just spin 2257 // else if (U.compareAndSetInt(this, SIZECTL, sc, -1)) { 2258 // try { 2259 // if ((tab = table) == null || tab.length == 0) { 2260 // int n = (sc > 0) ? sc : DEFAULT_CAPACITY; 2261 2262 // Node!(K, V)[] nt = (Node!(K, V)[])new Node<?,?>[n]; 2263 // table = tab = nt; 2264 // sc = n - (n >>> 2); 2265 // } 2266 // } finally { 2267 // sizeCtl = sc; 2268 // } 2269 // break; 2270 // } 2271 // } 2272 // return tab; 2273 // } 2274 2275 // /** 2276 // * Adds to count, and if table is too small and not already 2277 // * resizing, initiates transfer. If already resizing, helps 2278 // * perform transfer if work is available. Rechecks occupancy 2279 // * after a transfer to see if another resize is already needed 2280 // * because resizings are lagging additions. 2281 // * 2282 // * @param x the count to add 2283 // * @param check if <0, don't check resize, if <= 1 only check if uncontended 2284 // */ 2285 // private final void addCount(long x, int check) { 2286 // CounterCell[] cs; long b, s; 2287 // if ((cs = counterCells) != null || 2288 // !U.compareAndSetLong(this, BASECOUNT, b = baseCount, s = b + x)) { 2289 // CounterCell c; long v; int m; 2290 // boolean uncontended = true; 2291 // if (cs == null || (m = cs.length - 1) < 0 || 2292 // (c = cs[ThreadLocalRandom.getProbe() & m]) == null || 2293 // !(uncontended = 2294 // U.compareAndSetLong(c, CELLVALUE, v = c.value, v + x))) { 2295 // fullAddCount(x, uncontended); 2296 // return; 2297 // } 2298 // if (check <= 1) 2299 // return; 2300 // s = sumCount(); 2301 // } 2302 // if (check >= 0) { 2303 // Node!(K, V)[] tab, nt; int n, sc; 2304 // while (s >= (long)(sc = sizeCtl) && (tab = table) != null && 2305 // (n = tab.length) < MAXIMUM_CAPACITY) { 2306 // int rs = resizeStamp(n) << RESIZE_STAMP_SHIFT; 2307 // if (sc < 0) { 2308 // if (sc == rs + MAX_RESIZERS || sc == rs + 1 || 2309 // (nt = nextTable) == null || transferIndex <= 0) 2310 // break; 2311 // if (U.compareAndSetInt(this, SIZECTL, sc, sc + 1)) 2312 // transfer(tab, nt); 2313 // } 2314 // else if (U.compareAndSetInt(this, SIZECTL, sc, rs + 2)) 2315 // transfer(tab, null); 2316 // s = sumCount(); 2317 // } 2318 // } 2319 // } 2320 2321 // /** 2322 // * Helps transfer if a resize is in progress. 2323 // */ 2324 // final Node!(K, V)[] helpTransfer(Node!(K, V)[] tab, Node!(K, V) f) { 2325 // Node!(K, V)[] nextTab; int sc; 2326 // if (tab != null && (f instanceof ForwardingNode) && 2327 // (nextTab = ((ForwardingNode!(K, V))f).nextTable) != null) { 2328 // int rs = resizeStamp(tab.length) << RESIZE_STAMP_SHIFT; 2329 // while (nextTab == nextTable && table == tab && 2330 // (sc = sizeCtl) < 0) { 2331 // if (sc == rs + MAX_RESIZERS || sc == rs + 1 || 2332 // transferIndex <= 0) 2333 // break; 2334 // if (U.compareAndSetInt(this, SIZECTL, sc, sc + 1)) { 2335 // transfer(tab, nextTab); 2336 // break; 2337 // } 2338 // } 2339 // return nextTab; 2340 // } 2341 // return table; 2342 // } 2343 2344 // /** 2345 // * Tries to presize table to accommodate the given number of elements. 2346 // * 2347 // * @param size number of elements (doesn't need to be perfectly accurate) 2348 // */ 2349 // private final void tryPresize(int size) { 2350 // int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : 2351 // tableSizeFor(size + (size >>> 1) + 1); 2352 // int sc; 2353 // while ((sc = sizeCtl) >= 0) { 2354 // Node!(K, V)[] tab = table; int n; 2355 // if (tab == null || (n = tab.length) == 0) { 2356 // n = (sc > c) ? sc : c; 2357 // if (U.compareAndSetInt(this, SIZECTL, sc, -1)) { 2358 // try { 2359 // if (table == tab) { 2360 2361 // Node!(K, V)[] nt = (Node!(K, V)[])new Node<?,?>[n]; 2362 // table = nt; 2363 // sc = n - (n >>> 2); 2364 // } 2365 // } finally { 2366 // sizeCtl = sc; 2367 // } 2368 // } 2369 // } 2370 // else if (c <= sc || n >= MAXIMUM_CAPACITY) 2371 // break; 2372 // else if (tab == table) { 2373 // int rs = resizeStamp(n); 2374 // if (U.compareAndSetInt(this, SIZECTL, sc, 2375 // (rs << RESIZE_STAMP_SHIFT) + 2)) 2376 // transfer(tab, null); 2377 // } 2378 // } 2379 // } 2380 2381 // /** 2382 // * Moves and/or copies the nodes in each bin to new table. See 2383 // * above for explanation. 2384 // */ 2385 // private final void transfer(Node!(K, V)[] tab, Node!(K, V)[] nextTab) { 2386 // int n = tab.length, stride; 2387 // if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE) 2388 // stride = MIN_TRANSFER_STRIDE; // subdivide range 2389 // if (nextTab == null) { // initiating 2390 // try { 2391 2392 // Node!(K, V)[] nt = (Node!(K, V)[])new Node<?,?>[n << 1]; 2393 // nextTab = nt; 2394 // } catch (Throwable ex) { // try to cope with OOME 2395 // sizeCtl = Integer.MAX_VALUE; 2396 // return; 2397 // } 2398 // nextTable = nextTab; 2399 // transferIndex = n; 2400 // } 2401 // int nextn = nextTab.length; 2402 // ForwardingNode!(K, V) fwd = new ForwardingNode!(K, V)(nextTab); 2403 // boolean advance = true; 2404 // boolean finishing = false; // to ensure sweep before committing nextTab 2405 // for (int i = 0, bound = 0;;) { 2406 // Node!(K, V) f; int fh; 2407 // while (advance) { 2408 // int nextIndex, nextBound; 2409 // if (--i >= bound || finishing) 2410 // advance = false; 2411 // else if ((nextIndex = transferIndex) <= 0) { 2412 // i = -1; 2413 // advance = false; 2414 // } 2415 // else if (U.compareAndSetInt 2416 // (this, TRANSFERINDEX, nextIndex, 2417 // nextBound = (nextIndex > stride ? 2418 // nextIndex - stride : 0))) { 2419 // bound = nextBound; 2420 // i = nextIndex - 1; 2421 // advance = false; 2422 // } 2423 // } 2424 // if (i < 0 || i >= n || i + n >= nextn) { 2425 // int sc; 2426 // if (finishing) { 2427 // nextTable = null; 2428 // table = nextTab; 2429 // sizeCtl = (n << 1) - (n >>> 1); 2430 // return; 2431 // } 2432 // if (U.compareAndSetInt(this, SIZECTL, sc = sizeCtl, sc - 1)) { 2433 // if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT) 2434 // return; 2435 // finishing = advance = true; 2436 // i = n; // recheck before commit 2437 // } 2438 // } 2439 // else if ((f = tabAt(tab, i)) == null) 2440 // advance = casTabAt(tab, i, null, fwd); 2441 // else if ((fh = f.hash) == MOVED) 2442 // advance = true; // already processed 2443 // else { 2444 // synchronized (f) { 2445 // if (tabAt(tab, i) == f) { 2446 // Node!(K, V) ln, hn; 2447 // if (fh >= 0) { 2448 // int runBit = fh & n; 2449 // Node!(K, V) lastRun = f; 2450 // for (Node!(K, V) p = f.next; p != null; p = p.next) { 2451 // int b = p.hash & n; 2452 // if (b != runBit) { 2453 // runBit = b; 2454 // lastRun = p; 2455 // } 2456 // } 2457 // if (runBit == 0) { 2458 // ln = lastRun; 2459 // hn = null; 2460 // } 2461 // else { 2462 // hn = lastRun; 2463 // ln = null; 2464 // } 2465 // for (Node!(K, V) p = f; p != lastRun; p = p.next) { 2466 // int ph = p.hash; K pk = p.key; V pv = p.val; 2467 // if ((ph & n) == 0) 2468 // ln = new Node!(K, V)(ph, pk, pv, ln); 2469 // else 2470 // hn = new Node!(K, V)(ph, pk, pv, hn); 2471 // } 2472 // setTabAt(nextTab, i, ln); 2473 // setTabAt(nextTab, i + n, hn); 2474 // setTabAt(tab, i, fwd); 2475 // advance = true; 2476 // } 2477 // else if (f instanceof TreeBin) { 2478 // TreeBin!(K, V) t = (TreeBin!(K, V))f; 2479 // TreeNode!(K, V) lo = null, loTail = null; 2480 // TreeNode!(K, V) hi = null, hiTail = null; 2481 // int lc = 0, hc = 0; 2482 // for (Node!(K, V) e = t.first; e != null; e = e.next) { 2483 // int h = e.hash; 2484 // TreeNode!(K, V) p = new TreeNode!(K, V) 2485 // (h, e.key, e.val, null, null); 2486 // if ((h & n) == 0) { 2487 // if ((p.prev = loTail) == null) 2488 // lo = p; 2489 // else 2490 // loTail.next = p; 2491 // loTail = p; 2492 // ++lc; 2493 // } 2494 // else { 2495 // if ((p.prev = hiTail) == null) 2496 // hi = p; 2497 // else 2498 // hiTail.next = p; 2499 // hiTail = p; 2500 // ++hc; 2501 // } 2502 // } 2503 // ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) : 2504 // (hc != 0) ? new TreeBin!(K, V)(lo) : t; 2505 // hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) : 2506 // (lc != 0) ? new TreeBin!(K, V)(hi) : t; 2507 // setTabAt(nextTab, i, ln); 2508 // setTabAt(nextTab, i + n, hn); 2509 // setTabAt(tab, i, fwd); 2510 // advance = true; 2511 // } 2512 // else if (f instanceof ReservationNode) 2513 // throw new IllegalStateException("Recursive update"); 2514 // } 2515 // } 2516 // } 2517 // } 2518 // } 2519 2520 // /* ---------------- Counter support -------------- */ 2521 2522 // /** 2523 // * A padded cell for distributing counts. Adapted from LongAdder 2524 // * and Striped64. See their internal docs for explanation. 2525 // */ 2526 // @jdk.internal.vm.annotation.Contended static final class CounterCell { 2527 // volatile long value; 2528 // CounterCell(long x) { value = x; } 2529 // } 2530 2531 // final long sumCount() { 2532 // CounterCell[] cs = counterCells; 2533 // long sum = baseCount; 2534 // if (cs != null) { 2535 // for (CounterCell c : cs) 2536 // if (c != null) 2537 // sum += c.value; 2538 // } 2539 // return sum; 2540 // } 2541 2542 // // See LongAdder version for explanation 2543 // private final void fullAddCount(long x, boolean wasUncontended) { 2544 // int h; 2545 // if ((h = ThreadLocalRandom.getProbe()) == 0) { 2546 // ThreadLocalRandom.localInit(); // force initialization 2547 // h = ThreadLocalRandom.getProbe(); 2548 // wasUncontended = true; 2549 // } 2550 // boolean collide = false; // True if last slot nonempty 2551 // for (;;) { 2552 // CounterCell[] cs; CounterCell c; int n; long v; 2553 // if ((cs = counterCells) != null && (n = cs.length) > 0) { 2554 // if ((c = cs[(n - 1) & h]) == null) { 2555 // if (cellsBusy == 0) { // Try to attach new Cell 2556 // CounterCell r = new CounterCell(x); // Optimistic create 2557 // if (cellsBusy == 0 && 2558 // U.compareAndSetInt(this, CELLSBUSY, 0, 1)) { 2559 // boolean created = false; 2560 // try { // Recheck under lock 2561 // CounterCell[] rs; int m, j; 2562 // if ((rs = counterCells) != null && 2563 // (m = rs.length) > 0 && 2564 // rs[j = (m - 1) & h] == null) { 2565 // rs[j] = r; 2566 // created = true; 2567 // } 2568 // } finally { 2569 // cellsBusy = 0; 2570 // } 2571 // if (created) 2572 // break; 2573 // continue; // Slot is now non-empty 2574 // } 2575 // } 2576 // collide = false; 2577 // } 2578 // else if (!wasUncontended) // CAS already known to fail 2579 // wasUncontended = true; // Continue after rehash 2580 // else if (U.compareAndSetLong(c, CELLVALUE, v = c.value, v + x)) 2581 // break; 2582 // else if (counterCells != cs || n >= NCPU) 2583 // collide = false; // At max size or stale 2584 // else if (!collide) 2585 // collide = true; 2586 // else if (cellsBusy == 0 && 2587 // U.compareAndSetInt(this, CELLSBUSY, 0, 1)) { 2588 // try { 2589 // if (counterCells == cs) // Expand table unless stale 2590 // counterCells = Arrays.copyOf(cs, n << 1); 2591 // } finally { 2592 // cellsBusy = 0; 2593 // } 2594 // collide = false; 2595 // continue; // Retry with expanded table 2596 // } 2597 // h = ThreadLocalRandom.advanceProbe(h); 2598 // } 2599 // else if (cellsBusy == 0 && counterCells == cs && 2600 // U.compareAndSetInt(this, CELLSBUSY, 0, 1)) { 2601 // boolean init = false; 2602 // try { // Initialize table 2603 // if (counterCells == cs) { 2604 // CounterCell[] rs = new CounterCell[2]; 2605 // rs[h & 1] = new CounterCell(x); 2606 // counterCells = rs; 2607 // init = true; 2608 // } 2609 // } finally { 2610 // cellsBusy = 0; 2611 // } 2612 // if (init) 2613 // break; 2614 // } 2615 // else if (U.compareAndSetLong(this, BASECOUNT, v = baseCount, v + x)) 2616 // break; // Fall back on using base 2617 // } 2618 // } 2619 2620 // /* ---------------- Conversion from/to TreeBins -------------- */ 2621 2622 // /** 2623 // * Replaces all linked nodes in bin at given index unless table is 2624 // * too small, in which case resizes instead. 2625 // */ 2626 // private final void treeifyBin(Node!(K, V)[] tab, int index) { 2627 // Node!(K, V) b; int n; 2628 // if (tab != null) { 2629 // if ((n = tab.length) < MIN_TREEIFY_CAPACITY) 2630 // tryPresize(n << 1); 2631 // else if ((b = tabAt(tab, index)) != null && b.hash >= 0) { 2632 // synchronized (b) { 2633 // if (tabAt(tab, index) == b) { 2634 // TreeNode!(K, V) hd = null, tl = null; 2635 // for (Node!(K, V) e = b; e != null; e = e.next) { 2636 // TreeNode!(K, V) p = 2637 // new TreeNode!(K, V)(e.hash, e.key, e.val, 2638 // null, null); 2639 // if ((p.prev = tl) == null) 2640 // hd = p; 2641 // else 2642 // tl.next = p; 2643 // tl = p; 2644 // } 2645 // setTabAt(tab, index, new TreeBin!(K, V)(hd)); 2646 // } 2647 // } 2648 // } 2649 // } 2650 // } 2651 2652 // /** 2653 // * Returns a list of non-TreeNodes replacing those in given list. 2654 // */ 2655 // static !(K, V) Node!(K, V) untreeify(Node!(K, V) b) { 2656 // Node!(K, V) hd = null, tl = null; 2657 // for (Node!(K, V) q = b; q != null; q = q.next) { 2658 // Node!(K, V) p = new Node!(K, V)(q.hash, q.key, q.val); 2659 // if (tl == null) 2660 // hd = p; 2661 // else 2662 // tl.next = p; 2663 // tl = p; 2664 // } 2665 // return hd; 2666 // } 2667 2668 // /* ---------------- TreeNodes -------------- */ 2669 2670 // /** 2671 // * Nodes for use in TreeBins. 2672 // */ 2673 // static final class TreeNode!(K, V) extends Node!(K, V) { 2674 // TreeNode!(K, V) parent; // red-black tree links 2675 // TreeNode!(K, V) left; 2676 // TreeNode!(K, V) right; 2677 // TreeNode!(K, V) prev; // needed to unlink next upon deletion 2678 // boolean red; 2679 2680 // TreeNode(int hash, K key, V val, Node!(K, V) next, 2681 // TreeNode!(K, V) parent) { 2682 // super(hash, key, val, next); 2683 // this.parent = parent; 2684 // } 2685 2686 // Node!(K, V) find(int h, Object k) { 2687 // return findTreeNode(h, k, null); 2688 // } 2689 2690 // /** 2691 // * Returns the TreeNode (or null if not found) for the given key 2692 // * starting at given root. 2693 // */ 2694 // final TreeNode!(K, V) findTreeNode(int h, Object k, Class<?> kc) { 2695 // if (k != null) { 2696 // TreeNode!(K, V) p = this; 2697 // do { 2698 // int ph, dir; K pk; TreeNode!(K, V) q; 2699 // TreeNode!(K, V) pl = p.left, pr = p.right; 2700 // if ((ph = p.hash) > h) 2701 // p = pl; 2702 // else if (ph < h) 2703 // p = pr; 2704 // else if ((pk = p.key) == k || (pk != null && k.equals(pk))) 2705 // return p; 2706 // else if (pl == null) 2707 // p = pr; 2708 // else if (pr == null) 2709 // p = pl; 2710 // else if ((kc != null || 2711 // (kc = comparableClassFor(k)) != null) && 2712 // (dir = compareComparables(kc, k, pk)) != 0) 2713 // p = (dir < 0) ? pl : pr; 2714 // else if ((q = pr.findTreeNode(h, k, kc)) != null) 2715 // return q; 2716 // else 2717 // p = pl; 2718 // } while (p != null); 2719 // } 2720 // return null; 2721 // } 2722 // } 2723 2724 // /* ---------------- TreeBins -------------- */ 2725 2726 // /** 2727 // * TreeNodes used at the heads of bins. TreeBins do not hold user 2728 // * keys or values, but instead point to list of TreeNodes and 2729 // * their root. They also maintain a parasitic read-write lock 2730 // * forcing writers (who hold bin lock) to wait for readers (who do 2731 // * not) to complete before tree restructuring operations. 2732 // */ 2733 // static final class TreeBin!(K, V) extends Node!(K, V) { 2734 // TreeNode!(K, V) root; 2735 // volatile TreeNode!(K, V) first; 2736 // volatile Thread waiter; 2737 // volatile int lockState; 2738 // // values for lockState 2739 // static final int WRITER = 1; // set while holding write lock 2740 // static final int WAITER = 2; // set when waiting for write lock 2741 // static final int READER = 4; // increment value for setting read lock 2742 2743 // /** 2744 // * Tie-breaking utility for ordering insertions when equal 2745 // * hashCodes and non-comparable. We don't require a total 2746 // * order, just a consistent insertion rule to maintain 2747 // * equivalence across rebalancings. Tie-breaking further than 2748 // * necessary simplifies testing a bit. 2749 // */ 2750 // static int tieBreakOrder(Object a, Object b) { 2751 // int d; 2752 // if (a == null || b == null || 2753 // (d = a.getClass().getName(). 2754 // compareTo(b.getClass().getName())) == 0) 2755 // d = (System.identityHashCode(a) <= System.identityHashCode(b) ? 2756 // -1 : 1); 2757 // return d; 2758 // } 2759 2760 // /** 2761 // * Creates bin with initial set of nodes headed by b. 2762 // */ 2763 // TreeBin(TreeNode!(K, V) b) { 2764 // super(TREEBIN, null, null); 2765 // this.first = b; 2766 // TreeNode!(K, V) r = null; 2767 // for (TreeNode!(K, V) x = b, next; x != null; x = next) { 2768 // next = (TreeNode!(K, V))x.next; 2769 // x.left = x.right = null; 2770 // if (r == null) { 2771 // x.parent = null; 2772 // x.red = false; 2773 // r = x; 2774 // } 2775 // else { 2776 // K k = x.key; 2777 // int h = x.hash; 2778 // Class<?> kc = null; 2779 // for (TreeNode!(K, V) p = r;;) { 2780 // int dir, ph; 2781 // K pk = p.key; 2782 // if ((ph = p.hash) > h) 2783 // dir = -1; 2784 // else if (ph < h) 2785 // dir = 1; 2786 // else if ((kc == null && 2787 // (kc = comparableClassFor(k)) == null) || 2788 // (dir = compareComparables(kc, k, pk)) == 0) 2789 // dir = tieBreakOrder(k, pk); 2790 // TreeNode!(K, V) xp = p; 2791 // if ((p = (dir <= 0) ? p.left : p.right) == null) { 2792 // x.parent = xp; 2793 // if (dir <= 0) 2794 // xp.left = x; 2795 // else 2796 // xp.right = x; 2797 // r = balanceInsertion(r, x); 2798 // break; 2799 // } 2800 // } 2801 // } 2802 // } 2803 // this.root = r; 2804 // assert checkInvariants(root); 2805 // } 2806 2807 // /** 2808 // * Acquires write lock for tree restructuring. 2809 // */ 2810 // private final void lockRoot() { 2811 // if (!U.compareAndSetInt(this, LOCKSTATE, 0, WRITER)) 2812 // contendedLock(); // offload to separate method 2813 // } 2814 2815 // /** 2816 // * Releases write lock for tree restructuring. 2817 // */ 2818 // private final void unlockRoot() { 2819 // lockState = 0; 2820 // } 2821 2822 // /** 2823 // * Possibly blocks awaiting root lock. 2824 // */ 2825 // private final void contendedLock() { 2826 // boolean waiting = false; 2827 // for (int s;;) { 2828 // if (((s = lockState) & ~WAITER) == 0) { 2829 // if (U.compareAndSetInt(this, LOCKSTATE, s, WRITER)) { 2830 // if (waiting) 2831 // waiter = null; 2832 // return; 2833 // } 2834 // } 2835 // else if ((s & WAITER) == 0) { 2836 // if (U.compareAndSetInt(this, LOCKSTATE, s, s | WAITER)) { 2837 // waiting = true; 2838 // waiter = Thread.currentThread(); 2839 // } 2840 // } 2841 // else if (waiting) 2842 // LockSupport.park(this); 2843 // } 2844 // } 2845 2846 // /** 2847 // * Returns matching node or null if none. Tries to search 2848 // * using tree comparisons from root, but continues linear 2849 // * search when lock not available. 2850 // */ 2851 // final Node!(K, V) find(int h, Object k) { 2852 // if (k != null) { 2853 // for (Node!(K, V) e = first; e != null; ) { 2854 // int s; K ek; 2855 // if (((s = lockState) & (WAITER|WRITER)) != 0) { 2856 // if (e.hash == h && 2857 // ((ek = e.key) == k || (ek != null && k.equals(ek)))) 2858 // return e; 2859 // e = e.next; 2860 // } 2861 // else if (U.compareAndSetInt(this, LOCKSTATE, s, 2862 // s + READER)) { 2863 // TreeNode!(K, V) r, p; 2864 // try { 2865 // p = ((r = root) == null ? null : 2866 // r.findTreeNode(h, k, null)); 2867 // } finally { 2868 // Thread w; 2869 // if (U.getAndAddInt(this, LOCKSTATE, -READER) == 2870 // (READER|WAITER) && (w = waiter) != null) 2871 // LockSupport.unpark(w); 2872 // } 2873 // return p; 2874 // } 2875 // } 2876 // } 2877 // return null; 2878 // } 2879 2880 // /** 2881 // * Finds or adds a node. 2882 // * @return null if added 2883 // */ 2884 // final TreeNode!(K, V) putTreeVal(int h, K k, V v) { 2885 // Class<?> kc = null; 2886 // boolean searched = false; 2887 // for (TreeNode!(K, V) p = root;;) { 2888 // int dir, ph; K pk; 2889 // if (p == null) { 2890 // first = root = new TreeNode!(K, V)(h, k, v, null, null); 2891 // break; 2892 // } 2893 // else if ((ph = p.hash) > h) 2894 // dir = -1; 2895 // else if (ph < h) 2896 // dir = 1; 2897 // else if ((pk = p.key) == k || (pk != null && k.equals(pk))) 2898 // return p; 2899 // else if ((kc == null && 2900 // (kc = comparableClassFor(k)) == null) || 2901 // (dir = compareComparables(kc, k, pk)) == 0) { 2902 // if (!searched) { 2903 // TreeNode!(K, V) q, ch; 2904 // searched = true; 2905 // if (((ch = p.left) != null && 2906 // (q = ch.findTreeNode(h, k, kc)) != null) || 2907 // ((ch = p.right) != null && 2908 // (q = ch.findTreeNode(h, k, kc)) != null)) 2909 // return q; 2910 // } 2911 // dir = tieBreakOrder(k, pk); 2912 // } 2913 2914 // TreeNode!(K, V) xp = p; 2915 // if ((p = (dir <= 0) ? p.left : p.right) == null) { 2916 // TreeNode!(K, V) x, f = first; 2917 // first = x = new TreeNode!(K, V)(h, k, v, f, xp); 2918 // if (f != null) 2919 // f.prev = x; 2920 // if (dir <= 0) 2921 // xp.left = x; 2922 // else 2923 // xp.right = x; 2924 // if (!xp.red) 2925 // x.red = true; 2926 // else { 2927 // lockRoot(); 2928 // try { 2929 // root = balanceInsertion(root, x); 2930 // } finally { 2931 // unlockRoot(); 2932 // } 2933 // } 2934 // break; 2935 // } 2936 // } 2937 // assert checkInvariants(root); 2938 // return null; 2939 // } 2940 2941 // /** 2942 // * Removes the given node, that must be present before this 2943 // * call. This is messier than typical red-black deletion code 2944 // * because we cannot swap the contents of an interior node 2945 // * with a leaf successor that is pinned by "next" pointers 2946 // * that are accessible independently of lock. So instead we 2947 // * swap the tree linkages. 2948 // * 2949 // * @return true if now too small, so should be untreeified 2950 // */ 2951 // final boolean removeTreeNode(TreeNode!(K, V) p) { 2952 // TreeNode!(K, V) next = (TreeNode!(K, V))p.next; 2953 // TreeNode!(K, V) pred = p.prev; // unlink traversal pointers 2954 // TreeNode!(K, V) r, rl; 2955 // if (pred == null) 2956 // first = next; 2957 // else 2958 // pred.next = next; 2959 // if (next != null) 2960 // next.prev = pred; 2961 // if (first == null) { 2962 // root = null; 2963 // return true; 2964 // } 2965 // if ((r = root) == null || r.right == null || // too small 2966 // (rl = r.left) == null || rl.left == null) 2967 // return true; 2968 // lockRoot(); 2969 // try { 2970 // TreeNode!(K, V) replacement; 2971 // TreeNode!(K, V) pl = p.left; 2972 // TreeNode!(K, V) pr = p.right; 2973 // if (pl != null && pr != null) { 2974 // TreeNode!(K, V) s = pr, sl; 2975 // while ((sl = s.left) != null) // find successor 2976 // s = sl; 2977 // boolean c = s.red; s.red = p.red; p.red = c; // swap colors 2978 // TreeNode!(K, V) sr = s.right; 2979 // TreeNode!(K, V) pp = p.parent; 2980 // if (s == pr) { // p was s's direct parent 2981 // p.parent = s; 2982 // s.right = p; 2983 // } 2984 // else { 2985 // TreeNode!(K, V) sp = s.parent; 2986 // if ((p.parent = sp) != null) { 2987 // if (s == sp.left) 2988 // sp.left = p; 2989 // else 2990 // sp.right = p; 2991 // } 2992 // if ((s.right = pr) != null) 2993 // pr.parent = s; 2994 // } 2995 // p.left = null; 2996 // if ((p.right = sr) != null) 2997 // sr.parent = p; 2998 // if ((s.left = pl) != null) 2999 // pl.parent = s; 3000 // if ((s.parent = pp) == null) 3001 // r = s; 3002 // else if (p == pp.left) 3003 // pp.left = s; 3004 // else 3005 // pp.right = s; 3006 // if (sr != null) 3007 // replacement = sr; 3008 // else 3009 // replacement = p; 3010 // } 3011 // else if (pl != null) 3012 // replacement = pl; 3013 // else if (pr != null) 3014 // replacement = pr; 3015 // else 3016 // replacement = p; 3017 // if (replacement != p) { 3018 // TreeNode!(K, V) pp = replacement.parent = p.parent; 3019 // if (pp == null) 3020 // r = replacement; 3021 // else if (p == pp.left) 3022 // pp.left = replacement; 3023 // else 3024 // pp.right = replacement; 3025 // p.left = p.right = p.parent = null; 3026 // } 3027 3028 // root = (p.red) ? r : balanceDeletion(r, replacement); 3029 3030 // if (p == replacement) { // detach pointers 3031 // TreeNode!(K, V) pp; 3032 // if ((pp = p.parent) != null) { 3033 // if (p == pp.left) 3034 // pp.left = null; 3035 // else if (p == pp.right) 3036 // pp.right = null; 3037 // p.parent = null; 3038 // } 3039 // } 3040 // } finally { 3041 // unlockRoot(); 3042 // } 3043 // assert checkInvariants(root); 3044 // return false; 3045 // } 3046 3047 // /* ------------------------------------------------------------ */ 3048 // // Red-black tree methods, all adapted from CLR 3049 3050 // static !(K, V) TreeNode!(K, V) rotateLeft(TreeNode!(K, V) root, 3051 // TreeNode!(K, V) p) { 3052 // TreeNode!(K, V) r, pp, rl; 3053 // if (p != null && (r = p.right) != null) { 3054 // if ((rl = p.right = r.left) != null) 3055 // rl.parent = p; 3056 // if ((pp = r.parent = p.parent) == null) 3057 // (root = r).red = false; 3058 // else if (pp.left == p) 3059 // pp.left = r; 3060 // else 3061 // pp.right = r; 3062 // r.left = p; 3063 // p.parent = r; 3064 // } 3065 // return root; 3066 // } 3067 3068 // static !(K, V) TreeNode!(K, V) rotateRight(TreeNode!(K, V) root, 3069 // TreeNode!(K, V) p) { 3070 // TreeNode!(K, V) l, pp, lr; 3071 // if (p != null && (l = p.left) != null) { 3072 // if ((lr = p.left = l.right) != null) 3073 // lr.parent = p; 3074 // if ((pp = l.parent = p.parent) == null) 3075 // (root = l).red = false; 3076 // else if (pp.right == p) 3077 // pp.right = l; 3078 // else 3079 // pp.left = l; 3080 // l.right = p; 3081 // p.parent = l; 3082 // } 3083 // return root; 3084 // } 3085 3086 // static !(K, V) TreeNode!(K, V) balanceInsertion(TreeNode!(K, V) root, 3087 // TreeNode!(K, V) x) { 3088 // x.red = true; 3089 // for (TreeNode!(K, V) xp, xpp, xppl, xppr;;) { 3090 // if ((xp = x.parent) == null) { 3091 // x.red = false; 3092 // return x; 3093 // } 3094 // else if (!xp.red || (xpp = xp.parent) == null) 3095 // return root; 3096 // if (xp == (xppl = xpp.left)) { 3097 // if ((xppr = xpp.right) != null && xppr.red) { 3098 // xppr.red = false; 3099 // xp.red = false; 3100 // xpp.red = true; 3101 // x = xpp; 3102 // } 3103 // else { 3104 // if (x == xp.right) { 3105 // root = rotateLeft(root, x = xp); 3106 // xpp = (xp = x.parent) == null ? null : xp.parent; 3107 // } 3108 // if (xp != null) { 3109 // xp.red = false; 3110 // if (xpp != null) { 3111 // xpp.red = true; 3112 // root = rotateRight(root, xpp); 3113 // } 3114 // } 3115 // } 3116 // } 3117 // else { 3118 // if (xppl != null && xppl.red) { 3119 // xppl.red = false; 3120 // xp.red = false; 3121 // xpp.red = true; 3122 // x = xpp; 3123 // } 3124 // else { 3125 // if (x == xp.left) { 3126 // root = rotateRight(root, x = xp); 3127 // xpp = (xp = x.parent) == null ? null : xp.parent; 3128 // } 3129 // if (xp != null) { 3130 // xp.red = false; 3131 // if (xpp != null) { 3132 // xpp.red = true; 3133 // root = rotateLeft(root, xpp); 3134 // } 3135 // } 3136 // } 3137 // } 3138 // } 3139 // } 3140 3141 // static !(K, V) TreeNode!(K, V) balanceDeletion(TreeNode!(K, V) root, 3142 // TreeNode!(K, V) x) { 3143 // for (TreeNode!(K, V) xp, xpl, xpr;;) { 3144 // if (x == null || x == root) 3145 // return root; 3146 // else if ((xp = x.parent) == null) { 3147 // x.red = false; 3148 // return x; 3149 // } 3150 // else if (x.red) { 3151 // x.red = false; 3152 // return root; 3153 // } 3154 // else if ((xpl = xp.left) == x) { 3155 // if ((xpr = xp.right) != null && xpr.red) { 3156 // xpr.red = false; 3157 // xp.red = true; 3158 // root = rotateLeft(root, xp); 3159 // xpr = (xp = x.parent) == null ? null : xp.right; 3160 // } 3161 // if (xpr == null) 3162 // x = xp; 3163 // else { 3164 // TreeNode!(K, V) sl = xpr.left, sr = xpr.right; 3165 // if ((sr == null || !sr.red) && 3166 // (sl == null || !sl.red)) { 3167 // xpr.red = true; 3168 // x = xp; 3169 // } 3170 // else { 3171 // if (sr == null || !sr.red) { 3172 // if (sl != null) 3173 // sl.red = false; 3174 // xpr.red = true; 3175 // root = rotateRight(root, xpr); 3176 // xpr = (xp = x.parent) == null ? 3177 // null : xp.right; 3178 // } 3179 // if (xpr != null) { 3180 // xpr.red = (xp == null) ? false : xp.red; 3181 // if ((sr = xpr.right) != null) 3182 // sr.red = false; 3183 // } 3184 // if (xp != null) { 3185 // xp.red = false; 3186 // root = rotateLeft(root, xp); 3187 // } 3188 // x = root; 3189 // } 3190 // } 3191 // } 3192 // else { // symmetric 3193 // if (xpl != null && xpl.red) { 3194 // xpl.red = false; 3195 // xp.red = true; 3196 // root = rotateRight(root, xp); 3197 // xpl = (xp = x.parent) == null ? null : xp.left; 3198 // } 3199 // if (xpl == null) 3200 // x = xp; 3201 // else { 3202 // TreeNode!(K, V) sl = xpl.left, sr = xpl.right; 3203 // if ((sl == null || !sl.red) && 3204 // (sr == null || !sr.red)) { 3205 // xpl.red = true; 3206 // x = xp; 3207 // } 3208 // else { 3209 // if (sl == null || !sl.red) { 3210 // if (sr != null) 3211 // sr.red = false; 3212 // xpl.red = true; 3213 // root = rotateLeft(root, xpl); 3214 // xpl = (xp = x.parent) == null ? 3215 // null : xp.left; 3216 // } 3217 // if (xpl != null) { 3218 // xpl.red = (xp == null) ? false : xp.red; 3219 // if ((sl = xpl.left) != null) 3220 // sl.red = false; 3221 // } 3222 // if (xp != null) { 3223 // xp.red = false; 3224 // root = rotateRight(root, xp); 3225 // } 3226 // x = root; 3227 // } 3228 // } 3229 // } 3230 // } 3231 // } 3232 3233 // /** 3234 // * Checks invariants recursively for the tree of Nodes rooted at t. 3235 // */ 3236 // static !(K, V) boolean checkInvariants(TreeNode!(K, V) t) { 3237 // TreeNode!(K, V) tp = t.parent, tl = t.left, tr = t.right, 3238 // tb = t.prev, tn = (TreeNode!(K, V))t.next; 3239 // if (tb != null && tb.next != t) 3240 // return false; 3241 // if (tn != null && tn.prev != t) 3242 // return false; 3243 // if (tp != null && t != tp.left && t != tp.right) 3244 // return false; 3245 // if (tl != null && (tl.parent != t || tl.hash > t.hash)) 3246 // return false; 3247 // if (tr != null && (tr.parent != t || tr.hash < t.hash)) 3248 // return false; 3249 // if (t.red && tl != null && tl.red && tr != null && tr.red) 3250 // return false; 3251 // if (tl != null && !checkInvariants(tl)) 3252 // return false; 3253 // if (tr != null && !checkInvariants(tr)) 3254 // return false; 3255 // return true; 3256 // } 3257 3258 // private static final long LOCKSTATE = U.objectFieldOffset( 3259 // TreeBin.class, "lockState"); 3260 // } 3261 3262 // /* ----------------Table Traversal -------------- */ 3263 3264 // /** 3265 // * Records the table, its length, and current traversal index for a 3266 // * traverser that must process a region of a forwarded table before 3267 // * proceeding with current table. 3268 // */ 3269 // static final class TableStack!(K, V) { 3270 // int length; 3271 // int index; 3272 // Node!(K, V)[] tab; 3273 // TableStack!(K, V) next; 3274 // } 3275 3276 // /** 3277 // * Encapsulates traversal for methods such as containsValue; also 3278 // * serves as a base class for other iterators and spliterators. 3279 // * 3280 // * Method advance visits once each still-valid node that was 3281 // * reachable upon iterator construction. It might miss some that 3282 // * were added to a bin after the bin was visited, which is OK wrt 3283 // * consistency guarantees. Maintaining this property in the face 3284 // * of possible ongoing resizes requires a fair amount of 3285 // * bookkeeping state that is difficult to optimize away amidst 3286 // * volatile accesses. Even so, traversal maintains reasonable 3287 // * throughput. 3288 // * 3289 // * Normally, iteration proceeds bin-by-bin traversing lists. 3290 // * However, if the table has been resized, then all future steps 3291 // * must traverse both the bin at the current index as well as at 3292 // * (index + baseSize); and so on for further resizings. To 3293 // * paranoically cope with potential sharing by users of iterators 3294 // * across threads, iteration terminates if a bounds checks fails 3295 // * for a table read. 3296 // */ 3297 // static class Traverser!(K, V) { 3298 // Node!(K, V)[] tab; // current table; updated if resized 3299 // Node!(K, V) next; // the next entry to use 3300 // TableStack!(K, V) stack, spare; // to save/restore on ForwardingNodes 3301 // int index; // index of bin to use next 3302 // int baseIndex; // current index of initial table 3303 // int baseLimit; // index bound for initial table 3304 // final int baseSize; // initial table size 3305 3306 // Traverser(Node!(K, V)[] tab, int size, int index, int limit) { 3307 // this.tab = tab; 3308 // this.baseSize = size; 3309 // this.baseIndex = this.index = index; 3310 // this.baseLimit = limit; 3311 // this.next = null; 3312 // } 3313 3314 // /** 3315 // * Advances if possible, returning next valid node, or null if none. 3316 // */ 3317 // final Node!(K, V) advance() { 3318 // Node!(K, V) e; 3319 // if ((e = next) != null) 3320 // e = e.next; 3321 // for (;;) { 3322 // Node!(K, V)[] t; int i, n; // must use locals in checks 3323 // if (e != null) 3324 // return next = e; 3325 // if (baseIndex >= baseLimit || (t = tab) == null || 3326 // (n = t.length) <= (i = index) || i < 0) 3327 // return next = null; 3328 // if ((e = tabAt(t, i)) != null && e.hash < 0) { 3329 // if (e instanceof ForwardingNode) { 3330 // tab = ((ForwardingNode!(K, V))e).nextTable; 3331 // e = null; 3332 // pushState(t, i, n); 3333 // continue; 3334 // } 3335 // else if (e instanceof TreeBin) 3336 // e = ((TreeBin!(K, V))e).first; 3337 // else 3338 // e = null; 3339 // } 3340 // if (stack != null) 3341 // recoverState(n); 3342 // else if ((index = i + baseSize) >= n) 3343 // index = ++baseIndex; // visit upper slots if present 3344 // } 3345 // } 3346 3347 // /** 3348 // * Saves traversal state upon encountering a forwarding node. 3349 // */ 3350 // private void pushState(Node!(K, V)[] t, int i, int n) { 3351 // TableStack!(K, V) s = spare; // reuse if possible 3352 // if (s != null) 3353 // spare = s.next; 3354 // else 3355 // s = new TableStack!(K, V)(); 3356 // s.tab = t; 3357 // s.length = n; 3358 // s.index = i; 3359 // s.next = stack; 3360 // stack = s; 3361 // } 3362 3363 // /** 3364 // * Possibly pops traversal state. 3365 // * 3366 // * @param n length of current table 3367 // */ 3368 // private void recoverState(int n) { 3369 // TableStack!(K, V) s; int len; 3370 // while ((s = stack) != null && (index += (len = s.length)) >= n) { 3371 // n = len; 3372 // index = s.index; 3373 // tab = s.tab; 3374 // s.tab = null; 3375 // TableStack!(K, V) next = s.next; 3376 // s.next = spare; // save for reuse 3377 // stack = next; 3378 // spare = s; 3379 // } 3380 // if (s == null && (index += baseSize) >= n) 3381 // index = ++baseIndex; 3382 // } 3383 // } 3384 3385 // /** 3386 // * Base of key, value, and entry Iterators. Adds fields to 3387 // * Traverser to support iterator.remove. 3388 // */ 3389 // static class BaseIterator!(K, V) extends Traverser!(K, V) { 3390 // final ConcurrentHashMap!(K, V) map; 3391 // Node!(K, V) lastReturned; 3392 // BaseIterator(Node!(K, V)[] tab, int size, int index, int limit, 3393 // ConcurrentHashMap!(K, V) map) { 3394 // super(tab, size, index, limit); 3395 // this.map = map; 3396 // advance(); 3397 // } 3398 3399 // public final boolean hasNext() { return next != null; } 3400 // public final boolean hasMoreElements() { return next != null; } 3401 3402 // public final void remove() { 3403 // Node!(K, V) p; 3404 // if ((p = lastReturned) == null) 3405 // throw new IllegalStateException(); 3406 // lastReturned = null; 3407 // map.replaceNode(p.key, null, null); 3408 // } 3409 // } 3410 3411 // static final class KeyIterator!(K, V) extends BaseIterator!(K, V) 3412 // implements Iterator<K>, Enumeration<K> { 3413 // KeyIterator(Node!(K, V)[] tab, int size, int index, int limit, 3414 // ConcurrentHashMap!(K, V) map) { 3415 // super(tab, size, index, limit, map); 3416 // } 3417 3418 // public final K next() { 3419 // Node!(K, V) p; 3420 // if ((p = next) == null) 3421 // throw new NoSuchElementException(); 3422 // K k = p.key; 3423 // lastReturned = p; 3424 // advance(); 3425 // return k; 3426 // } 3427 3428 // public final K nextElement() { return next(); } 3429 // } 3430 3431 // static final class ValueIterator!(K, V) extends BaseIterator!(K, V) 3432 // implements Iterator<V>, Enumeration<V> { 3433 // ValueIterator(Node!(K, V)[] tab, int size, int index, int limit, 3434 // ConcurrentHashMap!(K, V) map) { 3435 // super(tab, size, index, limit, map); 3436 // } 3437 3438 // public final V next() { 3439 // Node!(K, V) p; 3440 // if ((p = next) == null) 3441 // throw new NoSuchElementException(); 3442 // V v = p.val; 3443 // lastReturned = p; 3444 // advance(); 3445 // return v; 3446 // } 3447 3448 // public final V nextElement() { return next(); } 3449 // } 3450 3451 // static final class EntryIterator!(K, V) extends BaseIterator!(K, V) 3452 // implements Iterator<Map.Entry!(K, V)> { 3453 // EntryIterator(Node!(K, V)[] tab, int size, int index, int limit, 3454 // ConcurrentHashMap!(K, V) map) { 3455 // super(tab, size, index, limit, map); 3456 // } 3457 3458 // public final Map.Entry!(K, V) next() { 3459 // Node!(K, V) p; 3460 // if ((p = next) == null) 3461 // throw new NoSuchElementException(); 3462 // K k = p.key; 3463 // V v = p.val; 3464 // lastReturned = p; 3465 // advance(); 3466 // return new MapEntry!(K, V)(k, v, map); 3467 // } 3468 // } 3469 3470 // /** 3471 // * Exported Entry for EntryIterator. 3472 // */ 3473 // static final class MapEntry!(K, V) implements Map.Entry!(K, V) { 3474 // final K key; // non-null 3475 // V val; // non-null 3476 // final ConcurrentHashMap!(K, V) map; 3477 // MapEntry(K key, V val, ConcurrentHashMap!(K, V) map) { 3478 // this.key = key; 3479 // this.val = val; 3480 // this.map = map; 3481 // } 3482 // public K getKey() { return key; } 3483 // public V getValue() { return val; } 3484 // public int hashCode() { return key.hashCode() ^ val.hashCode(); } 3485 // public String toString() { 3486 // return Helpers.mapEntryToString(key, val); 3487 // } 3488 3489 // public boolean equals(Object o) { 3490 // Object k, v; Map.Entry<?,?> e; 3491 // return ((o instanceof Map.Entry) && 3492 // (k = (e = (Map.Entry<?,?>)o).getKey()) != null && 3493 // (v = e.getValue()) != null && 3494 // (k == key || k.equals(key)) && 3495 // (v == val || v.equals(val))); 3496 // } 3497 3498 // /** 3499 // * Sets our entry's value and writes through to the map. The 3500 // * value to return is somewhat arbitrary here. Since we do not 3501 // * necessarily track asynchronous changes, the most recent 3502 // * "previous" value could be different from what we return (or 3503 // * could even have been removed, in which case the put will 3504 // * re-establish). We do not and cannot guarantee more. 3505 // */ 3506 // public V setValue(V value) { 3507 // if (value == null) throw new NullPointerException(); 3508 // V v = val; 3509 // val = value; 3510 // map.put(key, value); 3511 // return v; 3512 // } 3513 // } 3514 3515 // static final class KeySpliterator!(K, V) extends Traverser!(K, V) 3516 // implements Spliterator<K> { 3517 // long est; // size estimate 3518 // KeySpliterator(Node!(K, V)[] tab, int size, int index, int limit, 3519 // long est) { 3520 // super(tab, size, index, limit); 3521 // this.est = est; 3522 // } 3523 3524 // public KeySpliterator!(K, V) trySplit() { 3525 // int i, f, h; 3526 // return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : 3527 // new KeySpliterator!(K, V)(tab, baseSize, baseLimit = h, 3528 // f, est >>>= 1); 3529 // } 3530 3531 // public void forEachRemaining(Consumer<? super K> action) { 3532 // if (action == null) throw new NullPointerException(); 3533 // for (Node!(K, V) p; (p = advance()) != null;) 3534 // action.accept(p.key); 3535 // } 3536 3537 // public boolean tryAdvance(Consumer<? super K> action) { 3538 // if (action == null) throw new NullPointerException(); 3539 // Node!(K, V) p; 3540 // if ((p = advance()) == null) 3541 // return false; 3542 // action.accept(p.key); 3543 // return true; 3544 // } 3545 3546 // public long estimateSize() { return est; } 3547 3548 // public int characteristics() { 3549 // return Spliterator.DISTINCT | Spliterator.CONCURRENT | 3550 // Spliterator.NONNULL; 3551 // } 3552 // } 3553 3554 // static final class ValueSpliterator!(K, V) extends Traverser!(K, V) 3555 // implements Spliterator<V> { 3556 // long est; // size estimate 3557 // ValueSpliterator(Node!(K, V)[] tab, int size, int index, int limit, 3558 // long est) { 3559 // super(tab, size, index, limit); 3560 // this.est = est; 3561 // } 3562 3563 // public ValueSpliterator!(K, V) trySplit() { 3564 // int i, f, h; 3565 // return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : 3566 // new ValueSpliterator!(K, V)(tab, baseSize, baseLimit = h, 3567 // f, est >>>= 1); 3568 // } 3569 3570 // public void forEachRemaining(Consumer<? super V> action) { 3571 // if (action == null) throw new NullPointerException(); 3572 // for (Node!(K, V) p; (p = advance()) != null;) 3573 // action.accept(p.val); 3574 // } 3575 3576 // public boolean tryAdvance(Consumer<? super V> action) { 3577 // if (action == null) throw new NullPointerException(); 3578 // Node!(K, V) p; 3579 // if ((p = advance()) == null) 3580 // return false; 3581 // action.accept(p.val); 3582 // return true; 3583 // } 3584 3585 // public long estimateSize() { return est; } 3586 3587 // public int characteristics() { 3588 // return Spliterator.CONCURRENT | Spliterator.NONNULL; 3589 // } 3590 // } 3591 3592 // static final class EntrySpliterator!(K, V) extends Traverser!(K, V) 3593 // implements Spliterator<Map.Entry!(K, V)> { 3594 // final ConcurrentHashMap!(K, V) map; // To export MapEntry 3595 // long est; // size estimate 3596 // EntrySpliterator(Node!(K, V)[] tab, int size, int index, int limit, 3597 // long est, ConcurrentHashMap!(K, V) map) { 3598 // super(tab, size, index, limit); 3599 // this.map = map; 3600 // this.est = est; 3601 // } 3602 3603 // public EntrySpliterator!(K, V) trySplit() { 3604 // int i, f, h; 3605 // return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : 3606 // new EntrySpliterator!(K, V)(tab, baseSize, baseLimit = h, 3607 // f, est >>>= 1, map); 3608 // } 3609 3610 // public void forEachRemaining(Consumer<? super Map.Entry!(K, V)> action) { 3611 // if (action == null) throw new NullPointerException(); 3612 // for (Node!(K, V) p; (p = advance()) != null; ) 3613 // action.accept(new MapEntry!(K, V)(p.key, p.val, map)); 3614 // } 3615 3616 // public boolean tryAdvance(Consumer<? super Map.Entry!(K, V)> action) { 3617 // if (action == null) throw new NullPointerException(); 3618 // Node!(K, V) p; 3619 // if ((p = advance()) == null) 3620 // return false; 3621 // action.accept(new MapEntry!(K, V)(p.key, p.val, map)); 3622 // return true; 3623 // } 3624 3625 // public long estimateSize() { return est; } 3626 3627 // public int characteristics() { 3628 // return Spliterator.DISTINCT | Spliterator.CONCURRENT | 3629 // Spliterator.NONNULL; 3630 // } 3631 // } 3632 3633 // // Parallel bulk operations 3634 3635 // /** 3636 // * Computes initial batch value for bulk tasks. The returned value 3637 // * is approximately exp2 of the number of times (minus one) to 3638 // * split task by two before executing leaf action. This value is 3639 // * faster to compute and more convenient to use as a guide to 3640 // * splitting than is the depth, since it is used while dividing by 3641 // * two anyway. 3642 // */ 3643 // final int batchFor(long b) { 3644 // long n; 3645 // if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b) 3646 // return 0; 3647 // int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4 3648 // return (b <= 0L || (n /= b) >= sp) ? sp : (int)n; 3649 // } 3650 3651 // /** 3652 // * Performs the given action for each (key, value). 3653 // * 3654 // * @param parallelismThreshold the (estimated) number of elements 3655 // * needed for this operation to be executed in parallel 3656 // * @param action the action 3657 // */ 3658 // public void forEach(long parallelismThreshold, 3659 // BiConsumer<? super K,? super V> action) { 3660 // if (action == null) throw new NullPointerException(); 3661 // new ForEachMappingTask!(K, V) 3662 // (null, batchFor(parallelismThreshold), 0, 0, table, 3663 // action).invoke(); 3664 // } 3665 3666 // /** 3667 // * Performs the given action for each non-null transformation 3668 // * of each (key, value). 3669 // * 3670 // * @param parallelismThreshold the (estimated) number of elements 3671 // * needed for this operation to be executed in parallel 3672 // * @param transformer a function returning the transformation 3673 // * for an element, or null if there is no transformation (in 3674 // * which case the action is not applied) 3675 // * @param action the action 3676 // * @param <U> the return type of the transformer 3677 // */ 3678 // public <U> void forEach(long parallelismThreshold, 3679 // BiFunction<? super K, ? super V, ? extends U> transformer, 3680 // Consumer<? super U> action) { 3681 // if (transformer == null || action == null) 3682 // throw new NullPointerException(); 3683 // new ForEachTransformedMappingTask<K,V,U> 3684 // (null, batchFor(parallelismThreshold), 0, 0, table, 3685 // transformer, action).invoke(); 3686 // } 3687 3688 // /** 3689 // * Returns a non-null result from applying the given search 3690 // * function on each (key, value), or null if none. Upon 3691 // * success, further element processing is suppressed and the 3692 // * results of any other parallel invocations of the search 3693 // * function are ignored. 3694 // * 3695 // * @param parallelismThreshold the (estimated) number of elements 3696 // * needed for this operation to be executed in parallel 3697 // * @param searchFunction a function returning a non-null 3698 // * result on success, else null 3699 // * @param <U> the return type of the search function 3700 // * @return a non-null result from applying the given search 3701 // * function on each (key, value), or null if none 3702 // */ 3703 // public <U> U search(long parallelismThreshold, 3704 // BiFunction<? super K, ? super V, ? extends U> searchFunction) { 3705 // if (searchFunction == null) throw new NullPointerException(); 3706 // return new SearchMappingsTask<K,V,U> 3707 // (null, batchFor(parallelismThreshold), 0, 0, table, 3708 // searchFunction, new AtomicReference<U>()).invoke(); 3709 // } 3710 3711 // /** 3712 // * Returns the result of accumulating the given transformation 3713 // * of all (key, value) pairs using the given reducer to 3714 // * combine values, or null if none. 3715 // * 3716 // * @param parallelismThreshold the (estimated) number of elements 3717 // * needed for this operation to be executed in parallel 3718 // * @param transformer a function returning the transformation 3719 // * for an element, or null if there is no transformation (in 3720 // * which case it is not combined) 3721 // * @param reducer a commutative associative combining function 3722 // * @param <U> the return type of the transformer 3723 // * @return the result of accumulating the given transformation 3724 // * of all (key, value) pairs 3725 // */ 3726 // public <U> U reduce(long parallelismThreshold, 3727 // BiFunction<? super K, ? super V, ? extends U> transformer, 3728 // BiFunction<? super U, ? super U, ? extends U> reducer) { 3729 // if (transformer == null || reducer == null) 3730 // throw new NullPointerException(); 3731 // return new MapReduceMappingsTask<K,V,U> 3732 // (null, batchFor(parallelismThreshold), 0, 0, table, 3733 // null, transformer, reducer).invoke(); 3734 // } 3735 3736 // /** 3737 // * Returns the result of accumulating the given transformation 3738 // * of all (key, value) pairs using the given reducer to 3739 // * combine values, and the given basis as an identity value. 3740 // * 3741 // * @param parallelismThreshold the (estimated) number of elements 3742 // * needed for this operation to be executed in parallel 3743 // * @param transformer a function returning the transformation 3744 // * for an element 3745 // * @param basis the identity (initial default value) for the reduction 3746 // * @param reducer a commutative associative combining function 3747 // * @return the result of accumulating the given transformation 3748 // * of all (key, value) pairs 3749 // */ 3750 // public double reduceToDouble(long parallelismThreshold, 3751 // ToDoubleBiFunction<? super K, ? super V> transformer, 3752 // double basis, 3753 // DoubleBinaryOperator reducer) { 3754 // if (transformer == null || reducer == null) 3755 // throw new NullPointerException(); 3756 // return new MapReduceMappingsToDoubleTask!(K, V) 3757 // (null, batchFor(parallelismThreshold), 0, 0, table, 3758 // null, transformer, basis, reducer).invoke(); 3759 // } 3760 3761 // /** 3762 // * Returns the result of accumulating the given transformation 3763 // * of all (key, value) pairs using the given reducer to 3764 // * combine values, and the given basis as an identity value. 3765 // * 3766 // * @param parallelismThreshold the (estimated) number of elements 3767 // * needed for this operation to be executed in parallel 3768 // * @param transformer a function returning the transformation 3769 // * for an element 3770 // * @param basis the identity (initial default value) for the reduction 3771 // * @param reducer a commutative associative combining function 3772 // * @return the result of accumulating the given transformation 3773 // * of all (key, value) pairs 3774 // */ 3775 // public long reduceToLong(long parallelismThreshold, 3776 // ToLongBiFunction<? super K, ? super V> transformer, 3777 // long basis, 3778 // LongBinaryOperator reducer) { 3779 // if (transformer == null || reducer == null) 3780 // throw new NullPointerException(); 3781 // return new MapReduceMappingsToLongTask!(K, V) 3782 // (null, batchFor(parallelismThreshold), 0, 0, table, 3783 // null, transformer, basis, reducer).invoke(); 3784 // } 3785 3786 // /** 3787 // * Returns the result of accumulating the given transformation 3788 // * of all (key, value) pairs using the given reducer to 3789 // * combine values, and the given basis as an identity value. 3790 // * 3791 // * @param parallelismThreshold the (estimated) number of elements 3792 // * needed for this operation to be executed in parallel 3793 // * @param transformer a function returning the transformation 3794 // * for an element 3795 // * @param basis the identity (initial default value) for the reduction 3796 // * @param reducer a commutative associative combining function 3797 // * @return the result of accumulating the given transformation 3798 // * of all (key, value) pairs 3799 // */ 3800 // public int reduceToInt(long parallelismThreshold, 3801 // ToIntBiFunction<? super K, ? super V> transformer, 3802 // int basis, 3803 // IntBinaryOperator reducer) { 3804 // if (transformer == null || reducer == null) 3805 // throw new NullPointerException(); 3806 // return new MapReduceMappingsToIntTask!(K, V) 3807 // (null, batchFor(parallelismThreshold), 0, 0, table, 3808 // null, transformer, basis, reducer).invoke(); 3809 // } 3810 3811 // /** 3812 // * Performs the given action for each key. 3813 // * 3814 // * @param parallelismThreshold the (estimated) number of elements 3815 // * needed for this operation to be executed in parallel 3816 // * @param action the action 3817 // */ 3818 // public void forEachKey(long parallelismThreshold, 3819 // Consumer<? super K> action) { 3820 // if (action == null) throw new NullPointerException(); 3821 // new ForEachKeyTask!(K, V) 3822 // (null, batchFor(parallelismThreshold), 0, 0, table, 3823 // action).invoke(); 3824 // } 3825 3826 // /** 3827 // * Performs the given action for each non-null transformation 3828 // * of each key. 3829 // * 3830 // * @param parallelismThreshold the (estimated) number of elements 3831 // * needed for this operation to be executed in parallel 3832 // * @param transformer a function returning the transformation 3833 // * for an element, or null if there is no transformation (in 3834 // * which case the action is not applied) 3835 // * @param action the action 3836 // * @param <U> the return type of the transformer 3837 // */ 3838 // public <U> void forEachKey(long parallelismThreshold, 3839 // Function<? super K, ? extends U> transformer, 3840 // Consumer<? super U> action) { 3841 // if (transformer == null || action == null) 3842 // throw new NullPointerException(); 3843 // new ForEachTransformedKeyTask<K,V,U> 3844 // (null, batchFor(parallelismThreshold), 0, 0, table, 3845 // transformer, action).invoke(); 3846 // } 3847 3848 // /** 3849 // * Returns a non-null result from applying the given search 3850 // * function on each key, or null if none. Upon success, 3851 // * further element processing is suppressed and the results of 3852 // * any other parallel invocations of the search function are 3853 // * ignored. 3854 // * 3855 // * @param parallelismThreshold the (estimated) number of elements 3856 // * needed for this operation to be executed in parallel 3857 // * @param searchFunction a function returning a non-null 3858 // * result on success, else null 3859 // * @param <U> the return type of the search function 3860 // * @return a non-null result from applying the given search 3861 // * function on each key, or null if none 3862 // */ 3863 // public <U> U searchKeys(long parallelismThreshold, 3864 // Function<? super K, ? extends U> searchFunction) { 3865 // if (searchFunction == null) throw new NullPointerException(); 3866 // return new SearchKeysTask<K,V,U> 3867 // (null, batchFor(parallelismThreshold), 0, 0, table, 3868 // searchFunction, new AtomicReference<U>()).invoke(); 3869 // } 3870 3871 // /** 3872 // * Returns the result of accumulating all keys using the given 3873 // * reducer to combine values, or null if none. 3874 // * 3875 // * @param parallelismThreshold the (estimated) number of elements 3876 // * needed for this operation to be executed in parallel 3877 // * @param reducer a commutative associative combining function 3878 // * @return the result of accumulating all keys using the given 3879 // * reducer to combine values, or null if none 3880 // */ 3881 // public K reduceKeys(long parallelismThreshold, 3882 // BiFunction<? super K, ? super K, ? extends K> reducer) { 3883 // if (reducer == null) throw new NullPointerException(); 3884 // return new ReduceKeysTask!(K, V) 3885 // (null, batchFor(parallelismThreshold), 0, 0, table, 3886 // null, reducer).invoke(); 3887 // } 3888 3889 // /** 3890 // * Returns the result of accumulating the given transformation 3891 // * of all keys using the given reducer to combine values, or 3892 // * null if none. 3893 // * 3894 // * @param parallelismThreshold the (estimated) number of elements 3895 // * needed for this operation to be executed in parallel 3896 // * @param transformer a function returning the transformation 3897 // * for an element, or null if there is no transformation (in 3898 // * which case it is not combined) 3899 // * @param reducer a commutative associative combining function 3900 // * @param <U> the return type of the transformer 3901 // * @return the result of accumulating the given transformation 3902 // * of all keys 3903 // */ 3904 // public <U> U reduceKeys(long parallelismThreshold, 3905 // Function<? super K, ? extends U> transformer, 3906 // BiFunction<? super U, ? super U, ? extends U> reducer) { 3907 // if (transformer == null || reducer == null) 3908 // throw new NullPointerException(); 3909 // return new MapReduceKeysTask<K,V,U> 3910 // (null, batchFor(parallelismThreshold), 0, 0, table, 3911 // null, transformer, reducer).invoke(); 3912 // } 3913 3914 // /** 3915 // * Returns the result of accumulating the given transformation 3916 // * of all keys using the given reducer to combine values, and 3917 // * the given basis as an identity value. 3918 // * 3919 // * @param parallelismThreshold the (estimated) number of elements 3920 // * needed for this operation to be executed in parallel 3921 // * @param transformer a function returning the transformation 3922 // * for an element 3923 // * @param basis the identity (initial default value) for the reduction 3924 // * @param reducer a commutative associative combining function 3925 // * @return the result of accumulating the given transformation 3926 // * of all keys 3927 // */ 3928 // public double reduceKeysToDouble(long parallelismThreshold, 3929 // ToDoubleFunction<? super K> transformer, 3930 // double basis, 3931 // DoubleBinaryOperator reducer) { 3932 // if (transformer == null || reducer == null) 3933 // throw new NullPointerException(); 3934 // return new MapReduceKeysToDoubleTask!(K, V) 3935 // (null, batchFor(parallelismThreshold), 0, 0, table, 3936 // null, transformer, basis, reducer).invoke(); 3937 // } 3938 3939 // /** 3940 // * Returns the result of accumulating the given transformation 3941 // * of all keys using the given reducer to combine values, and 3942 // * the given basis as an identity value. 3943 // * 3944 // * @param parallelismThreshold the (estimated) number of elements 3945 // * needed for this operation to be executed in parallel 3946 // * @param transformer a function returning the transformation 3947 // * for an element 3948 // * @param basis the identity (initial default value) for the reduction 3949 // * @param reducer a commutative associative combining function 3950 // * @return the result of accumulating the given transformation 3951 // * of all keys 3952 // */ 3953 // public long reduceKeysToLong(long parallelismThreshold, 3954 // ToLongFunction<? super K> transformer, 3955 // long basis, 3956 // LongBinaryOperator reducer) { 3957 // if (transformer == null || reducer == null) 3958 // throw new NullPointerException(); 3959 // return new MapReduceKeysToLongTask!(K, V) 3960 // (null, batchFor(parallelismThreshold), 0, 0, table, 3961 // null, transformer, basis, reducer).invoke(); 3962 // } 3963 3964 // /** 3965 // * Returns the result of accumulating the given transformation 3966 // * of all keys using the given reducer to combine values, and 3967 // * the given basis as an identity value. 3968 // * 3969 // * @param parallelismThreshold the (estimated) number of elements 3970 // * needed for this operation to be executed in parallel 3971 // * @param transformer a function returning the transformation 3972 // * for an element 3973 // * @param basis the identity (initial default value) for the reduction 3974 // * @param reducer a commutative associative combining function 3975 // * @return the result of accumulating the given transformation 3976 // * of all keys 3977 // */ 3978 // public int reduceKeysToInt(long parallelismThreshold, 3979 // ToIntFunction<? super K> transformer, 3980 // int basis, 3981 // IntBinaryOperator reducer) { 3982 // if (transformer == null || reducer == null) 3983 // throw new NullPointerException(); 3984 // return new MapReduceKeysToIntTask!(K, V) 3985 // (null, batchFor(parallelismThreshold), 0, 0, table, 3986 // null, transformer, basis, reducer).invoke(); 3987 // } 3988 3989 // /** 3990 // * Performs the given action for each value. 3991 // * 3992 // * @param parallelismThreshold the (estimated) number of elements 3993 // * needed for this operation to be executed in parallel 3994 // * @param action the action 3995 // */ 3996 // public void forEachValue(long parallelismThreshold, 3997 // Consumer<? super V> action) { 3998 // if (action == null) 3999 // throw new NullPointerException(); 4000 // new ForEachValueTask!(K, V) 4001 // (null, batchFor(parallelismThreshold), 0, 0, table, 4002 // action).invoke(); 4003 // } 4004 4005 // /** 4006 // * Performs the given action for each non-null transformation 4007 // * of each value. 4008 // * 4009 // * @param parallelismThreshold the (estimated) number of elements 4010 // * needed for this operation to be executed in parallel 4011 // * @param transformer a function returning the transformation 4012 // * for an element, or null if there is no transformation (in 4013 // * which case the action is not applied) 4014 // * @param action the action 4015 // * @param <U> the return type of the transformer 4016 // */ 4017 // public <U> void forEachValue(long parallelismThreshold, 4018 // Function<? super V, ? extends U> transformer, 4019 // Consumer<? super U> action) { 4020 // if (transformer == null || action == null) 4021 // throw new NullPointerException(); 4022 // new ForEachTransformedValueTask<K,V,U> 4023 // (null, batchFor(parallelismThreshold), 0, 0, table, 4024 // transformer, action).invoke(); 4025 // } 4026 4027 // /** 4028 // * Returns a non-null result from applying the given search 4029 // * function on each value, or null if none. Upon success, 4030 // * further element processing is suppressed and the results of 4031 // * any other parallel invocations of the search function are 4032 // * ignored. 4033 // * 4034 // * @param parallelismThreshold the (estimated) number of elements 4035 // * needed for this operation to be executed in parallel 4036 // * @param searchFunction a function returning a non-null 4037 // * result on success, else null 4038 // * @param <U> the return type of the search function 4039 // * @return a non-null result from applying the given search 4040 // * function on each value, or null if none 4041 // */ 4042 // public <U> U searchValues(long parallelismThreshold, 4043 // Function<? super V, ? extends U> searchFunction) { 4044 // if (searchFunction == null) throw new NullPointerException(); 4045 // return new SearchValuesTask<K,V,U> 4046 // (null, batchFor(parallelismThreshold), 0, 0, table, 4047 // searchFunction, new AtomicReference<U>()).invoke(); 4048 // } 4049 4050 // /** 4051 // * Returns the result of accumulating all values using the 4052 // * given reducer to combine values, or null if none. 4053 // * 4054 // * @param parallelismThreshold the (estimated) number of elements 4055 // * needed for this operation to be executed in parallel 4056 // * @param reducer a commutative associative combining function 4057 // * @return the result of accumulating all values 4058 // */ 4059 // public V reduceValues(long parallelismThreshold, 4060 // BiFunction<? super V, ? super V, ? extends V> reducer) { 4061 // if (reducer == null) throw new NullPointerException(); 4062 // return new ReduceValuesTask!(K, V) 4063 // (null, batchFor(parallelismThreshold), 0, 0, table, 4064 // null, reducer).invoke(); 4065 // } 4066 4067 // /** 4068 // * Returns the result of accumulating the given transformation 4069 // * of all values using the given reducer to combine values, or 4070 // * null if none. 4071 // * 4072 // * @param parallelismThreshold the (estimated) number of elements 4073 // * needed for this operation to be executed in parallel 4074 // * @param transformer a function returning the transformation 4075 // * for an element, or null if there is no transformation (in 4076 // * which case it is not combined) 4077 // * @param reducer a commutative associative combining function 4078 // * @param <U> the return type of the transformer 4079 // * @return the result of accumulating the given transformation 4080 // * of all values 4081 // */ 4082 // public <U> U reduceValues(long parallelismThreshold, 4083 // Function<? super V, ? extends U> transformer, 4084 // BiFunction<? super U, ? super U, ? extends U> reducer) { 4085 // if (transformer == null || reducer == null) 4086 // throw new NullPointerException(); 4087 // return new MapReduceValuesTask<K,V,U> 4088 // (null, batchFor(parallelismThreshold), 0, 0, table, 4089 // null, transformer, reducer).invoke(); 4090 // } 4091 4092 // /** 4093 // * Returns the result of accumulating the given transformation 4094 // * of all values using the given reducer to combine values, 4095 // * and the given basis as an identity value. 4096 // * 4097 // * @param parallelismThreshold the (estimated) number of elements 4098 // * needed for this operation to be executed in parallel 4099 // * @param transformer a function returning the transformation 4100 // * for an element 4101 // * @param basis the identity (initial default value) for the reduction 4102 // * @param reducer a commutative associative combining function 4103 // * @return the result of accumulating the given transformation 4104 // * of all values 4105 // */ 4106 // public double reduceValuesToDouble(long parallelismThreshold, 4107 // ToDoubleFunction<? super V> transformer, 4108 // double basis, 4109 // DoubleBinaryOperator reducer) { 4110 // if (transformer == null || reducer == null) 4111 // throw new NullPointerException(); 4112 // return new MapReduceValuesToDoubleTask!(K, V) 4113 // (null, batchFor(parallelismThreshold), 0, 0, table, 4114 // null, transformer, basis, reducer).invoke(); 4115 // } 4116 4117 // /** 4118 // * Returns the result of accumulating the given transformation 4119 // * of all values using the given reducer to combine values, 4120 // * and the given basis as an identity value. 4121 // * 4122 // * @param parallelismThreshold the (estimated) number of elements 4123 // * needed for this operation to be executed in parallel 4124 // * @param transformer a function returning the transformation 4125 // * for an element 4126 // * @param basis the identity (initial default value) for the reduction 4127 // * @param reducer a commutative associative combining function 4128 // * @return the result of accumulating the given transformation 4129 // * of all values 4130 // */ 4131 // public long reduceValuesToLong(long parallelismThreshold, 4132 // ToLongFunction<? super V> transformer, 4133 // long basis, 4134 // LongBinaryOperator reducer) { 4135 // if (transformer == null || reducer == null) 4136 // throw new NullPointerException(); 4137 // return new MapReduceValuesToLongTask!(K, V) 4138 // (null, batchFor(parallelismThreshold), 0, 0, table, 4139 // null, transformer, basis, reducer).invoke(); 4140 // } 4141 4142 // /** 4143 // * Returns the result of accumulating the given transformation 4144 // * of all values using the given reducer to combine values, 4145 // * and the given basis as an identity value. 4146 // * 4147 // * @param parallelismThreshold the (estimated) number of elements 4148 // * needed for this operation to be executed in parallel 4149 // * @param transformer a function returning the transformation 4150 // * for an element 4151 // * @param basis the identity (initial default value) for the reduction 4152 // * @param reducer a commutative associative combining function 4153 // * @return the result of accumulating the given transformation 4154 // * of all values 4155 // */ 4156 // public int reduceValuesToInt(long parallelismThreshold, 4157 // ToIntFunction<? super V> transformer, 4158 // int basis, 4159 // IntBinaryOperator reducer) { 4160 // if (transformer == null || reducer == null) 4161 // throw new NullPointerException(); 4162 // return new MapReduceValuesToIntTask!(K, V) 4163 // (null, batchFor(parallelismThreshold), 0, 0, table, 4164 // null, transformer, basis, reducer).invoke(); 4165 // } 4166 4167 // /** 4168 // * Performs the given action for each entry. 4169 // * 4170 // * @param parallelismThreshold the (estimated) number of elements 4171 // * needed for this operation to be executed in parallel 4172 // * @param action the action 4173 // */ 4174 // public void forEachEntry(long parallelismThreshold, 4175 // Consumer<? super Map.Entry!(K, V)> action) { 4176 // if (action == null) throw new NullPointerException(); 4177 // new ForEachEntryTask!(K, V)(null, batchFor(parallelismThreshold), 0, 0, table, 4178 // action).invoke(); 4179 // } 4180 4181 // /** 4182 // * Performs the given action for each non-null transformation 4183 // * of each entry. 4184 // * 4185 // * @param parallelismThreshold the (estimated) number of elements 4186 // * needed for this operation to be executed in parallel 4187 // * @param transformer a function returning the transformation 4188 // * for an element, or null if there is no transformation (in 4189 // * which case the action is not applied) 4190 // * @param action the action 4191 // * @param <U> the return type of the transformer 4192 // */ 4193 // public <U> void forEachEntry(long parallelismThreshold, 4194 // Function<Map.Entry!(K, V), ? extends U> transformer, 4195 // Consumer<? super U> action) { 4196 // if (transformer == null || action == null) 4197 // throw new NullPointerException(); 4198 // new ForEachTransformedEntryTask<K,V,U> 4199 // (null, batchFor(parallelismThreshold), 0, 0, table, 4200 // transformer, action).invoke(); 4201 // } 4202 4203 // /** 4204 // * Returns a non-null result from applying the given search 4205 // * function on each entry, or null if none. Upon success, 4206 // * further element processing is suppressed and the results of 4207 // * any other parallel invocations of the search function are 4208 // * ignored. 4209 // * 4210 // * @param parallelismThreshold the (estimated) number of elements 4211 // * needed for this operation to be executed in parallel 4212 // * @param searchFunction a function returning a non-null 4213 // * result on success, else null 4214 // * @param <U> the return type of the search function 4215 // * @return a non-null result from applying the given search 4216 // * function on each entry, or null if none 4217 // */ 4218 // public <U> U searchEntries(long parallelismThreshold, 4219 // Function<Map.Entry!(K, V), ? extends U> searchFunction) { 4220 // if (searchFunction == null) throw new NullPointerException(); 4221 // return new SearchEntriesTask<K,V,U> 4222 // (null, batchFor(parallelismThreshold), 0, 0, table, 4223 // searchFunction, new AtomicReference<U>()).invoke(); 4224 // } 4225 4226 // /** 4227 // * Returns the result of accumulating all entries using the 4228 // * given reducer to combine values, or null if none. 4229 // * 4230 // * @param parallelismThreshold the (estimated) number of elements 4231 // * needed for this operation to be executed in parallel 4232 // * @param reducer a commutative associative combining function 4233 // * @return the result of accumulating all entries 4234 // */ 4235 // public Map.Entry!(K, V) reduceEntries(long parallelismThreshold, 4236 // BiFunction<Map.Entry!(K, V), Map.Entry!(K, V), ? extends Map.Entry!(K, V)> reducer) { 4237 // if (reducer == null) throw new NullPointerException(); 4238 // return new ReduceEntriesTask!(K, V) 4239 // (null, batchFor(parallelismThreshold), 0, 0, table, 4240 // null, reducer).invoke(); 4241 // } 4242 4243 // /** 4244 // * Returns the result of accumulating the given transformation 4245 // * of all entries using the given reducer to combine values, 4246 // * or null if none. 4247 // * 4248 // * @param parallelismThreshold the (estimated) number of elements 4249 // * needed for this operation to be executed in parallel 4250 // * @param transformer a function returning the transformation 4251 // * for an element, or null if there is no transformation (in 4252 // * which case it is not combined) 4253 // * @param reducer a commutative associative combining function 4254 // * @param <U> the return type of the transformer 4255 // * @return the result of accumulating the given transformation 4256 // * of all entries 4257 // */ 4258 // public <U> U reduceEntries(long parallelismThreshold, 4259 // Function<Map.Entry!(K, V), ? extends U> transformer, 4260 // BiFunction<? super U, ? super U, ? extends U> reducer) { 4261 // if (transformer == null || reducer == null) 4262 // throw new NullPointerException(); 4263 // return new MapReduceEntriesTask<K,V,U> 4264 // (null, batchFor(parallelismThreshold), 0, 0, table, 4265 // null, transformer, reducer).invoke(); 4266 // } 4267 4268 // /** 4269 // * Returns the result of accumulating the given transformation 4270 // * of all entries using the given reducer to combine values, 4271 // * and the given basis as an identity value. 4272 // * 4273 // * @param parallelismThreshold the (estimated) number of elements 4274 // * needed for this operation to be executed in parallel 4275 // * @param transformer a function returning the transformation 4276 // * for an element 4277 // * @param basis the identity (initial default value) for the reduction 4278 // * @param reducer a commutative associative combining function 4279 // * @return the result of accumulating the given transformation 4280 // * of all entries 4281 // */ 4282 // public double reduceEntriesToDouble(long parallelismThreshold, 4283 // ToDoubleFunction<Map.Entry!(K, V)> transformer, 4284 // double basis, 4285 // DoubleBinaryOperator reducer) { 4286 // if (transformer == null || reducer == null) 4287 // throw new NullPointerException(); 4288 // return new MapReduceEntriesToDoubleTask!(K, V) 4289 // (null, batchFor(parallelismThreshold), 0, 0, table, 4290 // null, transformer, basis, reducer).invoke(); 4291 // } 4292 4293 // /** 4294 // * Returns the result of accumulating the given transformation 4295 // * of all entries using the given reducer to combine values, 4296 // * and the given basis as an identity value. 4297 // * 4298 // * @param parallelismThreshold the (estimated) number of elements 4299 // * needed for this operation to be executed in parallel 4300 // * @param transformer a function returning the transformation 4301 // * for an element 4302 // * @param basis the identity (initial default value) for the reduction 4303 // * @param reducer a commutative associative combining function 4304 // * @return the result of accumulating the given transformation 4305 // * of all entries 4306 // */ 4307 // public long reduceEntriesToLong(long parallelismThreshold, 4308 // ToLongFunction<Map.Entry!(K, V)> transformer, 4309 // long basis, 4310 // LongBinaryOperator reducer) { 4311 // if (transformer == null || reducer == null) 4312 // throw new NullPointerException(); 4313 // return new MapReduceEntriesToLongTask!(K, V) 4314 // (null, batchFor(parallelismThreshold), 0, 0, table, 4315 // null, transformer, basis, reducer).invoke(); 4316 // } 4317 4318 // /** 4319 // * Returns the result of accumulating the given transformation 4320 // * of all entries using the given reducer to combine values, 4321 // * and the given basis as an identity value. 4322 // * 4323 // * @param parallelismThreshold the (estimated) number of elements 4324 // * needed for this operation to be executed in parallel 4325 // * @param transformer a function returning the transformation 4326 // * for an element 4327 // * @param basis the identity (initial default value) for the reduction 4328 // * @param reducer a commutative associative combining function 4329 // * @return the result of accumulating the given transformation 4330 // * of all entries 4331 // */ 4332 // public int reduceEntriesToInt(long parallelismThreshold, 4333 // ToIntFunction<Map.Entry!(K, V)> transformer, 4334 // int basis, 4335 // IntBinaryOperator reducer) { 4336 // if (transformer == null || reducer == null) 4337 // throw new NullPointerException(); 4338 // return new MapReduceEntriesToIntTask!(K, V) 4339 // (null, batchFor(parallelismThreshold), 0, 0, table, 4340 // null, transformer, basis, reducer).invoke(); 4341 // } 4342 4343 4344 // /* ----------------Views -------------- */ 4345 4346 // /** 4347 // * Base class for views. 4348 // */ 4349 // abstract static class CollectionView<K,V,E> 4350 // implements Collection<E>, java.io.Serializable { 4351 // private static final long serialVersionUID = 7249069246763182397L; 4352 // final ConcurrentHashMap!(K, V) map; 4353 // CollectionView(ConcurrentHashMap!(K, V) map) { this.map = map; } 4354 4355 // /** 4356 // * Returns the map backing this view. 4357 // * 4358 // * @return the map backing this view 4359 // */ 4360 // public ConcurrentHashMap!(K, V) getMap() { return map; } 4361 4362 // /** 4363 // * Removes all of the elements from this view, by removing all 4364 // * the mappings from the map backing this view. 4365 // */ 4366 // public final void clear() { map.clear(); } 4367 // public final int size() { return map.size(); } 4368 // public final boolean isEmpty() { return map.isEmpty(); } 4369 4370 // // implementations below rely on concrete classes supplying these 4371 // // abstract methods 4372 // /** 4373 // * Returns an iterator over the elements in this collection. 4374 // * 4375 // * <p>The returned iterator is 4376 // * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>. 4377 // * 4378 // * @return an iterator over the elements in this collection 4379 // */ 4380 // public abstract Iterator<E> iterator(); 4381 // public abstract boolean contains(Object o); 4382 // public abstract boolean remove(Object o); 4383 4384 // private static final String OOME_MSG = "Required array size too large"; 4385 4386 // public final Object[] toArray() { 4387 // long sz = map.mappingCount(); 4388 // if (sz > MAX_ARRAY_SIZE) 4389 // throw new OutOfMemoryError(OOME_MSG); 4390 // int n = (int)sz; 4391 // Object[] r = new Object[n]; 4392 // int i = 0; 4393 // for (E e : this) { 4394 // if (i == n) { 4395 // if (n >= MAX_ARRAY_SIZE) 4396 // throw new OutOfMemoryError(OOME_MSG); 4397 // if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) 4398 // n = MAX_ARRAY_SIZE; 4399 // else 4400 // n += (n >>> 1) + 1; 4401 // r = Arrays.copyOf(r, n); 4402 // } 4403 // r[i++] = e; 4404 // } 4405 // return (i == n) ? r : Arrays.copyOf(r, i); 4406 // } 4407 4408 4409 // public final <T> T[] toArray(T[] a) { 4410 // long sz = map.mappingCount(); 4411 // if (sz > MAX_ARRAY_SIZE) 4412 // throw new OutOfMemoryError(OOME_MSG); 4413 // int m = (int)sz; 4414 // T[] r = (a.length >= m) ? a : 4415 // (T[])java.lang.reflect.Array 4416 // .newInstance(a.getClass().getComponentType(), m); 4417 // int n = r.length; 4418 // int i = 0; 4419 // for (E e : this) { 4420 // if (i == n) { 4421 // if (n >= MAX_ARRAY_SIZE) 4422 // throw new OutOfMemoryError(OOME_MSG); 4423 // if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) 4424 // n = MAX_ARRAY_SIZE; 4425 // else 4426 // n += (n >>> 1) + 1; 4427 // r = Arrays.copyOf(r, n); 4428 // } 4429 // r[i++] = (T)e; 4430 // } 4431 // if (a == r && i < n) { 4432 // r[i] = null; // null-terminate 4433 // return r; 4434 // } 4435 // return (i == n) ? r : Arrays.copyOf(r, i); 4436 // } 4437 4438 // /** 4439 // * Returns a string representation of this collection. 4440 // * The string representation consists of the string representations 4441 // * of the collection's elements in the order they are returned by 4442 // * its iterator, enclosed in square brackets ({@code "[]"}). 4443 // * Adjacent elements are separated by the characters {@code ", "} 4444 // * (comma and space). Elements are converted to strings as by 4445 // * {@link String#valueOf(Object)}. 4446 // * 4447 // * @return a string representation of this collection 4448 // */ 4449 // public final String toString() { 4450 // StringBuilder sb = new StringBuilder(); 4451 // sb.append('['); 4452 // Iterator<E> it = iterator(); 4453 // if (it.hasNext()) { 4454 // for (;;) { 4455 // Object e = it.next(); 4456 // sb.append(e == this ? "(this Collection)" : e); 4457 // if (!it.hasNext()) 4458 // break; 4459 // sb.append(',').append(' '); 4460 // } 4461 // } 4462 // return sb.append(']').toString(); 4463 // } 4464 4465 // public final boolean containsAll(Collection<?> c) { 4466 // if (c != this) { 4467 // for (Object e : c) { 4468 // if (e == null || !contains(e)) 4469 // return false; 4470 // } 4471 // } 4472 // return true; 4473 // } 4474 4475 // public boolean removeAll(Collection<?> c) { 4476 // if (c == null) throw new NullPointerException(); 4477 // boolean modified = false; 4478 // // Use (c instanceof Set) as a hint that lookup in c is as 4479 // // efficient as this view 4480 // Node!(K, V)[] t; 4481 // if ((t = map.table) == null) { 4482 // return false; 4483 // } else if (c instanceof Set<?> && c.size() > t.length) { 4484 // for (Iterator<?> it = iterator(); it.hasNext(); ) { 4485 // if (c.contains(it.next())) { 4486 // it.remove(); 4487 // modified = true; 4488 // } 4489 // } 4490 // } else { 4491 // for (Object e : c) 4492 // modified |= remove(e); 4493 // } 4494 // return modified; 4495 // } 4496 4497 // public final boolean retainAll(Collection<?> c) { 4498 // if (c == null) throw new NullPointerException(); 4499 // boolean modified = false; 4500 // for (Iterator<E> it = iterator(); it.hasNext();) { 4501 // if (!c.contains(it.next())) { 4502 // it.remove(); 4503 // modified = true; 4504 // } 4505 // } 4506 // return modified; 4507 // } 4508 4509 // } 4510 4511 // /** 4512 // * A view of a ConcurrentHashMap as a {@link Set} of keys, in 4513 // * which additions may optionally be enabled by mapping to a 4514 // * common value. This class cannot be directly instantiated. 4515 // * See {@link #keySet() keySet()}, 4516 // * {@link #keySet(Object) keySet(V)}, 4517 // * {@link #newKeySet() newKeySet()}, 4518 // * {@link #newKeySet(int) newKeySet(int)}. 4519 // * 4520 // */ 4521 // public static class KeySetView!(K, V) extends CollectionView<K,V,K> 4522 // implements Set<K>, java.io.Serializable { 4523 // private static final long serialVersionUID = 7249069246763182397L; 4524 // private final V value; 4525 // KeySetView(ConcurrentHashMap!(K, V) map, V value) { // non-public 4526 // super(map); 4527 // this.value = value; 4528 // } 4529 4530 // /** 4531 // * Returns the default mapped value for additions, 4532 // * or {@code null} if additions are not supported. 4533 // * 4534 // * @return the default mapped value for additions, or {@code null} 4535 // * if not supported 4536 // */ 4537 // public V getMappedValue() { return value; } 4538 4539 // /** 4540 // * {@inheritDoc} 4541 // * @throws NullPointerException if the specified key is null 4542 // */ 4543 // public boolean contains(Object o) { return map.containsKey(o); } 4544 4545 // /** 4546 // * Removes the key from this map view, by removing the key (and its 4547 // * corresponding value) from the backing map. This method does 4548 // * nothing if the key is not in the map. 4549 // * 4550 // * @param o the key to be removed from the backing map 4551 // * @return {@code true} if the backing map contained the specified key 4552 // * @throws NullPointerException if the specified key is null 4553 // */ 4554 // public boolean remove(Object o) { return map.remove(o) != null; } 4555 4556 // /** 4557 // * @return an iterator over the keys of the backing map 4558 // */ 4559 // public Iterator<K> iterator() { 4560 // Node!(K, V)[] t; 4561 // ConcurrentHashMap!(K, V) m = map; 4562 // int f = (t = m.table) == null ? 0 : t.length; 4563 // return new KeyIterator!(K, V)(t, f, 0, f, m); 4564 // } 4565 4566 // /** 4567 // * Adds the specified key to this set view by mapping the key to 4568 // * the default mapped value in the backing map, if defined. 4569 // * 4570 // * @param e key to be added 4571 // * @return {@code true} if this set changed as a result of the call 4572 // * @throws NullPointerException if the specified key is null 4573 // * @throws UnsupportedOperationException if no default mapped value 4574 // * for additions was provided 4575 // */ 4576 // public boolean add(K e) { 4577 // V v; 4578 // if ((v = value) == null) 4579 // throw new UnsupportedOperationException(); 4580 // return map.putVal(e, v, true) == null; 4581 // } 4582 4583 // /** 4584 // * Adds all of the elements in the specified collection to this set, 4585 // * as if by calling {@link #add} on each one. 4586 // * 4587 // * @param c the elements to be inserted into this set 4588 // * @return {@code true} if this set changed as a result of the call 4589 // * @throws NullPointerException if the collection or any of its 4590 // * elements are {@code null} 4591 // * @throws UnsupportedOperationException if no default mapped value 4592 // * for additions was provided 4593 // */ 4594 // public boolean addAll(Collection<? extends K> c) { 4595 // boolean added = false; 4596 // V v; 4597 // if ((v = value) == null) 4598 // throw new UnsupportedOperationException(); 4599 // for (K e : c) { 4600 // if (map.putVal(e, v, true) == null) 4601 // added = true; 4602 // } 4603 // return added; 4604 // } 4605 4606 // public int hashCode() { 4607 // int h = 0; 4608 // for (K e : this) 4609 // h += e.hashCode(); 4610 // return h; 4611 // } 4612 4613 // public boolean equals(Object o) { 4614 // Set<?> c; 4615 // return ((o instanceof Set) && 4616 // ((c = (Set<?>)o) == this || 4617 // (containsAll(c) && c.containsAll(this)))); 4618 // } 4619 4620 // public Spliterator<K> spliterator() { 4621 // Node!(K, V)[] t; 4622 // ConcurrentHashMap!(K, V) m = map; 4623 // long n = m.sumCount(); 4624 // int f = (t = m.table) == null ? 0 : t.length; 4625 // return new KeySpliterator!(K, V)(t, f, 0, f, n < 0L ? 0L : n); 4626 // } 4627 4628 // public void forEach(Consumer<? super K> action) { 4629 // if (action == null) throw new NullPointerException(); 4630 // Node!(K, V)[] t; 4631 // if ((t = map.table) != null) { 4632 // Traverser!(K, V) it = new Traverser!(K, V)(t, t.length, 0, t.length); 4633 // for (Node!(K, V) p; (p = it.advance()) != null; ) 4634 // action.accept(p.key); 4635 // } 4636 // } 4637 // } 4638 4639 // /** 4640 // * A view of a ConcurrentHashMap as a {@link Collection} of 4641 // * values, in which additions are disabled. This class cannot be 4642 // * directly instantiated. See {@link #values()}. 4643 // */ 4644 // static final class ValuesView!(K, V) extends CollectionView<K,V,V> 4645 // implements Collection<V>, java.io.Serializable { 4646 // private static final long serialVersionUID = 2249069246763182397L; 4647 // ValuesView(ConcurrentHashMap!(K, V) map) { super(map); } 4648 // public final boolean contains(Object o) { 4649 // return map.containsValue(o); 4650 // } 4651 4652 // public final boolean remove(Object o) { 4653 // if (o != null) { 4654 // for (Iterator<V> it = iterator(); it.hasNext();) { 4655 // if (o.equals(it.next())) { 4656 // it.remove(); 4657 // return true; 4658 // } 4659 // } 4660 // } 4661 // return false; 4662 // } 4663 4664 // public final Iterator<V> iterator() { 4665 // ConcurrentHashMap!(K, V) m = map; 4666 // Node!(K, V)[] t; 4667 // int f = (t = m.table) == null ? 0 : t.length; 4668 // return new ValueIterator!(K, V)(t, f, 0, f, m); 4669 // } 4670 4671 // public final boolean add(V e) { 4672 // throw new UnsupportedOperationException(); 4673 // } 4674 // public final boolean addAll(Collection<? extends V> c) { 4675 // throw new UnsupportedOperationException(); 4676 // } 4677 4678 // @Override public boolean removeAll(Collection<?> c) { 4679 // if (c == null) throw new NullPointerException(); 4680 // boolean modified = false; 4681 // for (Iterator<V> it = iterator(); it.hasNext();) { 4682 // if (c.contains(it.next())) { 4683 // it.remove(); 4684 // modified = true; 4685 // } 4686 // } 4687 // return modified; 4688 // } 4689 4690 // public boolean removeIf(Predicate<? super V> filter) { 4691 // return map.removeValueIf(filter); 4692 // } 4693 4694 // public Spliterator<V> spliterator() { 4695 // Node!(K, V)[] t; 4696 // ConcurrentHashMap!(K, V) m = map; 4697 // long n = m.sumCount(); 4698 // int f = (t = m.table) == null ? 0 : t.length; 4699 // return new ValueSpliterator!(K, V)(t, f, 0, f, n < 0L ? 0L : n); 4700 // } 4701 4702 // public void forEach(Consumer<? super V> action) { 4703 // if (action == null) throw new NullPointerException(); 4704 // Node!(K, V)[] t; 4705 // if ((t = map.table) != null) { 4706 // Traverser!(K, V) it = new Traverser!(K, V)(t, t.length, 0, t.length); 4707 // for (Node!(K, V) p; (p = it.advance()) != null; ) 4708 // action.accept(p.val); 4709 // } 4710 // } 4711 // } 4712 4713 // /** 4714 // * A view of a ConcurrentHashMap as a {@link Set} of (key, value) 4715 // * entries. This class cannot be directly instantiated. See 4716 // * {@link #entrySet()}. 4717 // */ 4718 // static final class EntrySetView!(K, V) extends CollectionView<K,V,Map.Entry!(K, V)> 4719 // implements Set<Map.Entry!(K, V)>, java.io.Serializable { 4720 // private static final long serialVersionUID = 2249069246763182397L; 4721 // EntrySetView(ConcurrentHashMap!(K, V) map) { super(map); } 4722 4723 // public boolean contains(Object o) { 4724 // Object k, v, r; Map.Entry<?,?> e; 4725 // return ((o instanceof Map.Entry) && 4726 // (k = (e = (Map.Entry<?,?>)o).getKey()) != null && 4727 // (r = map.get(k)) != null && 4728 // (v = e.getValue()) != null && 4729 // (v == r || v.equals(r))); 4730 // } 4731 4732 // public boolean remove(Object o) { 4733 // Object k, v; Map.Entry<?,?> e; 4734 // return ((o instanceof Map.Entry) && 4735 // (k = (e = (Map.Entry<?,?>)o).getKey()) != null && 4736 // (v = e.getValue()) != null && 4737 // map.remove(k, v)); 4738 // } 4739 4740 // /** 4741 // * @return an iterator over the entries of the backing map 4742 // */ 4743 // public Iterator<Map.Entry!(K, V)> iterator() { 4744 // ConcurrentHashMap!(K, V) m = map; 4745 // Node!(K, V)[] t; 4746 // int f = (t = m.table) == null ? 0 : t.length; 4747 // return new EntryIterator!(K, V)(t, f, 0, f, m); 4748 // } 4749 4750 // public boolean add(Entry!(K, V) e) { 4751 // return map.putVal(e.getKey(), e.getValue(), false) == null; 4752 // } 4753 4754 // public boolean addAll(Collection<? extends Entry!(K, V)> c) { 4755 // boolean added = false; 4756 // for (Entry!(K, V) e : c) { 4757 // if (add(e)) 4758 // added = true; 4759 // } 4760 // return added; 4761 // } 4762 4763 // public boolean removeIf(Predicate<? super Entry!(K, V)> filter) { 4764 // return map.removeEntryIf(filter); 4765 // } 4766 4767 // public final int hashCode() { 4768 // int h = 0; 4769 // Node!(K, V)[] t; 4770 // if ((t = map.table) != null) { 4771 // Traverser!(K, V) it = new Traverser!(K, V)(t, t.length, 0, t.length); 4772 // for (Node!(K, V) p; (p = it.advance()) != null; ) { 4773 // h += p.hashCode(); 4774 // } 4775 // } 4776 // return h; 4777 // } 4778 4779 // public final boolean equals(Object o) { 4780 // Set<?> c; 4781 // return ((o instanceof Set) && 4782 // ((c = (Set<?>)o) == this || 4783 // (containsAll(c) && c.containsAll(this)))); 4784 // } 4785 4786 // public Spliterator<Map.Entry!(K, V)> spliterator() { 4787 // Node!(K, V)[] t; 4788 // ConcurrentHashMap!(K, V) m = map; 4789 // long n = m.sumCount(); 4790 // int f = (t = m.table) == null ? 0 : t.length; 4791 // return new EntrySpliterator!(K, V)(t, f, 0, f, n < 0L ? 0L : n, m); 4792 // } 4793 4794 // public void forEach(Consumer<? super Map.Entry!(K, V)> action) { 4795 // if (action == null) throw new NullPointerException(); 4796 // Node!(K, V)[] t; 4797 // if ((t = map.table) != null) { 4798 // Traverser!(K, V) it = new Traverser!(K, V)(t, t.length, 0, t.length); 4799 // for (Node!(K, V) p; (p = it.advance()) != null; ) 4800 // action.accept(new MapEntry!(K, V)(p.key, p.val, map)); 4801 // } 4802 // } 4803 4804 // } 4805 4806 // // ------------------------------------------------------- 4807 4808 // /** 4809 // * Base class for bulk tasks. Repeats some fields and code from 4810 // * class Traverser, because we need to subclass CountedCompleter. 4811 // */ 4812 // @SuppressWarnings("serial") 4813 // abstract static class BulkTask<K,V,R> extends CountedCompleter<R> { 4814 // Node!(K, V)[] tab; // same as Traverser 4815 // Node!(K, V) next; 4816 // TableStack!(K, V) stack, spare; 4817 // int index; 4818 // int baseIndex; 4819 // int baseLimit; 4820 // final int baseSize; 4821 // int batch; // split control 4822 4823 // BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node!(K, V)[] t) { 4824 // super(par); 4825 // this.batch = b; 4826 // this.index = this.baseIndex = i; 4827 // if ((this.tab = t) == null) 4828 // this.baseSize = this.baseLimit = 0; 4829 // else if (par == null) 4830 // this.baseSize = this.baseLimit = t.length; 4831 // else { 4832 // this.baseLimit = f; 4833 // this.baseSize = par.baseSize; 4834 // } 4835 // } 4836 4837 // /** 4838 // * Same as Traverser version. 4839 // */ 4840 // final Node!(K, V) advance() { 4841 // Node!(K, V) e; 4842 // if ((e = next) != null) 4843 // e = e.next; 4844 // for (;;) { 4845 // Node!(K, V)[] t; int i, n; 4846 // if (e != null) 4847 // return next = e; 4848 // if (baseIndex >= baseLimit || (t = tab) == null || 4849 // (n = t.length) <= (i = index) || i < 0) 4850 // return next = null; 4851 // if ((e = tabAt(t, i)) != null && e.hash < 0) { 4852 // if (e instanceof ForwardingNode) { 4853 // tab = ((ForwardingNode!(K, V))e).nextTable; 4854 // e = null; 4855 // pushState(t, i, n); 4856 // continue; 4857 // } 4858 // else if (e instanceof TreeBin) 4859 // e = ((TreeBin!(K, V))e).first; 4860 // else 4861 // e = null; 4862 // } 4863 // if (stack != null) 4864 // recoverState(n); 4865 // else if ((index = i + baseSize) >= n) 4866 // index = ++baseIndex; 4867 // } 4868 // } 4869 4870 // private void pushState(Node!(K, V)[] t, int i, int n) { 4871 // TableStack!(K, V) s = spare; 4872 // if (s != null) 4873 // spare = s.next; 4874 // else 4875 // s = new TableStack!(K, V)(); 4876 // s.tab = t; 4877 // s.length = n; 4878 // s.index = i; 4879 // s.next = stack; 4880 // stack = s; 4881 // } 4882 4883 // private void recoverState(int n) { 4884 // TableStack!(K, V) s; int len; 4885 // while ((s = stack) != null && (index += (len = s.length)) >= n) { 4886 // n = len; 4887 // index = s.index; 4888 // tab = s.tab; 4889 // s.tab = null; 4890 // TableStack!(K, V) next = s.next; 4891 // s.next = spare; // save for reuse 4892 // stack = next; 4893 // spare = s; 4894 // } 4895 // if (s == null && (index += baseSize) >= n) 4896 // index = ++baseIndex; 4897 // } 4898 // } 4899 4900 // /* 4901 // * Task classes. Coded in a regular but ugly format/style to 4902 // * simplify checks that each variant differs in the right way from 4903 // * others. The null screenings exist because compilers cannot tell 4904 // * that we've already null-checked task arguments, so we force 4905 // * simplest hoisted bypass to help avoid convoluted traps. 4906 // */ 4907 // @SuppressWarnings("serial") 4908 // static final class ForEachKeyTask!(K, V) 4909 // extends BulkTask<K,V,Void> { 4910 // final Consumer<? super K> action; 4911 // ForEachKeyTask 4912 // (BulkTask<K,V,?> p, int b, int i, int f, Node!(K, V)[] t, 4913 // Consumer<? super K> action) { 4914 // super(p, b, i, f, t); 4915 // this.action = action; 4916 // } 4917 // public final void compute() { 4918 // final Consumer<? super K> action; 4919 // if ((action = this.action) != null) { 4920 // for (int i = baseIndex, f, h; batch > 0 && 4921 // (h = ((f = baseLimit) + i) >>> 1) > i;) { 4922 // addToPendingCount(1); 4923 // new ForEachKeyTask!(K, V) 4924 // (this, batch >>>= 1, baseLimit = h, f, tab, 4925 // action).fork(); 4926 // } 4927 // for (Node!(K, V) p; (p = advance()) != null;) 4928 // action.accept(p.key); 4929 // propagateCompletion(); 4930 // } 4931 // } 4932 // } 4933 4934 // @SuppressWarnings("serial") 4935 // static final class ForEachValueTask!(K, V) 4936 // extends BulkTask<K,V,Void> { 4937 // final Consumer<? super V> action; 4938 // ForEachValueTask 4939 // (BulkTask<K,V,?> p, int b, int i, int f, Node!(K, V)[] t, 4940 // Consumer<? super V> action) { 4941 // super(p, b, i, f, t); 4942 // this.action = action; 4943 // } 4944 // public final void compute() { 4945 // final Consumer<? super V> action; 4946 // if ((action = this.action) != null) { 4947 // for (int i = baseIndex, f, h; batch > 0 && 4948 // (h = ((f = baseLimit) + i) >>> 1) > i;) { 4949 // addToPendingCount(1); 4950 // new ForEachValueTask!(K, V) 4951 // (this, batch >>>= 1, baseLimit = h, f, tab, 4952 // action).fork(); 4953 // } 4954 // for (Node!(K, V) p; (p = advance()) != null;) 4955 // action.accept(p.val); 4956 // propagateCompletion(); 4957 // } 4958 // } 4959 // } 4960 4961 // @SuppressWarnings("serial") 4962 // static final class ForEachEntryTask!(K, V) 4963 // extends BulkTask<K,V,Void> { 4964 // final Consumer<? super Entry!(K, V)> action; 4965 // ForEachEntryTask 4966 // (BulkTask<K,V,?> p, int b, int i, int f, Node!(K, V)[] t, 4967 // Consumer<? super Entry!(K, V)> action) { 4968 // super(p, b, i, f, t); 4969 // this.action = action; 4970 // } 4971 // public final void compute() { 4972 // final Consumer<? super Entry!(K, V)> action; 4973 // if ((action = this.action) != null) { 4974 // for (int i = baseIndex, f, h; batch > 0 && 4975 // (h = ((f = baseLimit) + i) >>> 1) > i;) { 4976 // addToPendingCount(1); 4977 // new ForEachEntryTask!(K, V) 4978 // (this, batch >>>= 1, baseLimit = h, f, tab, 4979 // action).fork(); 4980 // } 4981 // for (Node!(K, V) p; (p = advance()) != null; ) 4982 // action.accept(p); 4983 // propagateCompletion(); 4984 // } 4985 // } 4986 // } 4987 4988 // @SuppressWarnings("serial") 4989 // static final class ForEachMappingTask!(K, V) 4990 // extends BulkTask<K,V,Void> { 4991 // final BiConsumer<? super K, ? super V> action; 4992 // ForEachMappingTask 4993 // (BulkTask<K,V,?> p, int b, int i, int f, Node!(K, V)[] t, 4994 // BiConsumer<? super K,? super V> action) { 4995 // super(p, b, i, f, t); 4996 // this.action = action; 4997 // } 4998 // public final void compute() { 4999 // final BiConsumer<? super K, ? super V> action; 5000 // if ((action = this.action) != null) { 5001 // for (int i = baseIndex, f, h; batch > 0 && 5002 // (h = ((f = baseLimit) + i) >>> 1) > i;) { 5003 // addToPendingCount(1); 5004 // new ForEachMappingTask!(K, V) 5005 // (this, batch >>>= 1, baseLimit = h, f, tab, 5006 // action).fork(); 5007 // } 5008 // for (Node!(K, V) p; (p = advance()) != null; ) 5009 // action.accept(p.key, p.val); 5010 // propagateCompletion(); 5011 // } 5012 // } 5013 // } 5014 5015 // @SuppressWarnings("serial") 5016 // static final class ForEachTransformedKeyTask<K,V,U> 5017 // extends BulkTask<K,V,Void> { 5018 // final Function<? super K, ? extends U> transformer; 5019 // final Consumer<? super U> action; 5020 // ForEachTransformedKeyTask 5021 // (BulkTask<K,V,?> p, int b, int i, int f, Node!(K, V)[] t, 5022 // Function<? super K, ? extends U> transformer, Consumer<? super U> action) { 5023 // super(p, b, i, f, t); 5024 // this.transformer = transformer; this.action = action; 5025 // } 5026 // public final void compute() { 5027 // final Function<? super K, ? extends U> transformer; 5028 // final Consumer<? super U> action; 5029 // if ((transformer = this.transformer) != null && 5030 // (action = this.action) != null) { 5031 // for (int i = baseIndex, f, h; batch > 0 && 5032 // (h = ((f = baseLimit) + i) >>> 1) > i;) { 5033 // addToPendingCount(1); 5034 // new ForEachTransformedKeyTask<K,V,U> 5035 // (this, batch >>>= 1, baseLimit = h, f, tab, 5036 // transformer, action).fork(); 5037 // } 5038 // for (Node!(K, V) p; (p = advance()) != null; ) { 5039 // U u; 5040 // if ((u = transformer.apply(p.key)) != null) 5041 // action.accept(u); 5042 // } 5043 // propagateCompletion(); 5044 // } 5045 // } 5046 // } 5047 5048 // @SuppressWarnings("serial") 5049 // static final class ForEachTransformedValueTask<K,V,U> 5050 // extends BulkTask<K,V,Void> { 5051 // final Function<? super V, ? extends U> transformer; 5052 // final Consumer<? super U> action; 5053 // ForEachTransformedValueTask 5054 // (BulkTask<K,V,?> p, int b, int i, int f, Node!(K, V)[] t, 5055 // Function<? super V, ? extends U> transformer, Consumer<? super U> action) { 5056 // super(p, b, i, f, t); 5057 // this.transformer = transformer; this.action = action; 5058 // } 5059 // public final void compute() { 5060 // final Function<? super V, ? extends U> transformer; 5061 // final Consumer<? super U> action; 5062 // if ((transformer = this.transformer) != null && 5063 // (action = this.action) != null) { 5064 // for (int i = baseIndex, f, h; batch > 0 && 5065 // (h = ((f = baseLimit) + i) >>> 1) > i;) { 5066 // addToPendingCount(1); 5067 // new ForEachTransformedValueTask<K,V,U> 5068 // (this, batch >>>= 1, baseLimit = h, f, tab, 5069 // transformer, action).fork(); 5070 // } 5071 // for (Node!(K, V) p; (p = advance()) != null; ) { 5072 // U u; 5073 // if ((u = transformer.apply(p.val)) != null) 5074 // action.accept(u); 5075 // } 5076 // propagateCompletion(); 5077 // } 5078 // } 5079 // } 5080 5081 // @SuppressWarnings("serial") 5082 // static final class ForEachTransformedEntryTask<K,V,U> 5083 // extends BulkTask<K,V,Void> { 5084 // final Function<Map.Entry!(K, V), ? extends U> transformer; 5085 // final Consumer<? super U> action; 5086 // ForEachTransformedEntryTask 5087 // (BulkTask<K,V,?> p, int b, int i, int f, Node!(K, V)[] t, 5088 // Function<Map.Entry!(K, V), ? extends U> transformer, Consumer<? super U> action) { 5089 // super(p, b, i, f, t); 5090 // this.transformer = transformer; this.action = action; 5091 // } 5092 // public final void compute() { 5093 // final Function<Map.Entry!(K, V), ? extends U> transformer; 5094 // final Consumer<? super U> action; 5095 // if ((transformer = this.transformer) != null && 5096 // (action = this.action) != null) { 5097 // for (int i = baseIndex, f, h; batch > 0 && 5098 // (h = ((f = baseLimit) + i) >>> 1) > i;) { 5099 // addToPendingCount(1); 5100 // new ForEachTransformedEntryTask<K,V,U> 5101 // (this, batch >>>= 1, baseLimit = h, f, tab, 5102 // transformer, action).fork(); 5103 // } 5104 // for (Node!(K, V) p; (p = advance()) != null; ) { 5105 // U u; 5106 // if ((u = transformer.apply(p)) != null) 5107 // action.accept(u); 5108 // } 5109 // propagateCompletion(); 5110 // } 5111 // } 5112 // } 5113 5114 // @SuppressWarnings("serial") 5115 // static final class ForEachTransformedMappingTask<K,V,U> 5116 // extends BulkTask<K,V,Void> { 5117 // final BiFunction<? super K, ? super V, ? extends U> transformer; 5118 // final Consumer<? super U> action; 5119 // ForEachTransformedMappingTask 5120 // (BulkTask<K,V,?> p, int b, int i, int f, Node!(K, V)[] t, 5121 // BiFunction<? super K, ? super V, ? extends U> transformer, 5122 // Consumer<? super U> action) { 5123 // super(p, b, i, f, t); 5124 // this.transformer = transformer; this.action = action; 5125 // } 5126 // public final void compute() { 5127 // final BiFunction<? super K, ? super V, ? extends U> transformer; 5128 // final Consumer<? super U> action; 5129 // if ((transformer = this.transformer) != null && 5130 // (action = this.action) != null) { 5131 // for (int i = baseIndex, f, h; batch > 0 && 5132 // (h = ((f = baseLimit) + i) >>> 1) > i;) { 5133 // addToPendingCount(1); 5134 // new ForEachTransformedMappingTask<K,V,U> 5135 // (this, batch >>>= 1, baseLimit = h, f, tab, 5136 // transformer, action).fork(); 5137 // } 5138 // for (Node!(K, V) p; (p = advance()) != null; ) { 5139 // U u; 5140 // if ((u = transformer.apply(p.key, p.val)) != null) 5141 // action.accept(u); 5142 // } 5143 // propagateCompletion(); 5144 // } 5145 // } 5146 // } 5147 5148 // @SuppressWarnings("serial") 5149 // static final class SearchKeysTask<K,V,U> 5150 // extends BulkTask<K,V,U> { 5151 // final Function<? super K, ? extends U> searchFunction; 5152 // final AtomicReference<U> result; 5153 // SearchKeysTask 5154 // (BulkTask<K,V,?> p, int b, int i, int f, Node!(K, V)[] t, 5155 // Function<? super K, ? extends U> searchFunction, 5156 // AtomicReference<U> result) { 5157 // super(p, b, i, f, t); 5158 // this.searchFunction = searchFunction; this.result = result; 5159 // } 5160 // public final U getRawResult() { return result.get(); } 5161 // public final void compute() { 5162 // final Function<? super K, ? extends U> searchFunction; 5163 // final AtomicReference<U> result; 5164 // if ((searchFunction = this.searchFunction) != null && 5165 // (result = this.result) != null) { 5166 // for (int i = baseIndex, f, h; batch > 0 && 5167 // (h = ((f = baseLimit) + i) >>> 1) > i;) { 5168 // if (result.get() != null) 5169 // return; 5170 // addToPendingCount(1); 5171 // new SearchKeysTask<K,V,U> 5172 // (this, batch >>>= 1, baseLimit = h, f, tab, 5173 // searchFunction, result).fork(); 5174 // } 5175 // while (result.get() == null) { 5176 // U u; 5177 // Node!(K, V) p; 5178 // if ((p = advance()) == null) { 5179 // propagateCompletion(); 5180 // break; 5181 // } 5182 // if ((u = searchFunction.apply(p.key)) != null) { 5183 // if (result.compareAndSet(null, u)) 5184 // quietlyCompleteRoot(); 5185 // break; 5186 // } 5187 // } 5188 // } 5189 // } 5190 // } 5191 5192 // @SuppressWarnings("serial") 5193 // static final class SearchValuesTask<K,V,U> 5194 // extends BulkTask<K,V,U> { 5195 // final Function<? super V, ? extends U> searchFunction; 5196 // final AtomicReference<U> result; 5197 // SearchValuesTask 5198 // (BulkTask<K,V,?> p, int b, int i, int f, Node!(K, V)[] t, 5199 // Function<? super V, ? extends U> searchFunction, 5200 // AtomicReference<U> result) { 5201 // super(p, b, i, f, t); 5202 // this.searchFunction = searchFunction; this.result = result; 5203 // } 5204 // public final U getRawResult() { return result.get(); } 5205 // public final void compute() { 5206 // final Function<? super V, ? extends U> searchFunction; 5207 // final AtomicReference<U> result; 5208 // if ((searchFunction = this.searchFunction) != null && 5209 // (result = this.result) != null) { 5210 // for (int i = baseIndex, f, h; batch > 0 && 5211 // (h = ((f = baseLimit) + i) >>> 1) > i;) { 5212 // if (result.get() != null) 5213 // return; 5214 // addToPendingCount(1); 5215 // new SearchValuesTask<K,V,U> 5216 // (this, batch >>>= 1, baseLimit = h, f, tab, 5217 // searchFunction, result).fork(); 5218 // } 5219 // while (result.get() == null) { 5220 // U u; 5221 // Node!(K, V) p; 5222 // if ((p = advance()) == null) { 5223 // propagateCompletion(); 5224 // break; 5225 // } 5226 // if ((u = searchFunction.apply(p.val)) != null) { 5227 // if (result.compareAndSet(null, u)) 5228 // quietlyCompleteRoot(); 5229 // break; 5230 // } 5231 // } 5232 // } 5233 // } 5234 // } 5235 5236 // @SuppressWarnings("serial") 5237 // static final class SearchEntriesTask<K,V,U> 5238 // extends BulkTask<K,V,U> { 5239 // final Function<Entry!(K, V), ? extends U> searchFunction; 5240 // final AtomicReference<U> result; 5241 // SearchEntriesTask 5242 // (BulkTask<K,V,?> p, int b, int i, int f, Node!(K, V)[] t, 5243 // Function<Entry!(K, V), ? extends U> searchFunction, 5244 // AtomicReference<U> result) { 5245 // super(p, b, i, f, t); 5246 // this.searchFunction = searchFunction; this.result = result; 5247 // } 5248 // public final U getRawResult() { return result.get(); } 5249 // public final void compute() { 5250 // final Function<Entry!(K, V), ? extends U> searchFunction; 5251 // final AtomicReference<U> result; 5252 // if ((searchFunction = this.searchFunction) != null && 5253 // (result = this.result) != null) { 5254 // for (int i = baseIndex, f, h; batch > 0 && 5255 // (h = ((f = baseLimit) + i) >>> 1) > i;) { 5256 // if (result.get() != null) 5257 // return; 5258 // addToPendingCount(1); 5259 // new SearchEntriesTask<K,V,U> 5260 // (this, batch >>>= 1, baseLimit = h, f, tab, 5261 // searchFunction, result).fork(); 5262 // } 5263 // while (result.get() == null) { 5264 // U u; 5265 // Node!(K, V) p; 5266 // if ((p = advance()) == null) { 5267 // propagateCompletion(); 5268 // break; 5269 // } 5270 // if ((u = searchFunction.apply(p)) != null) { 5271 // if (result.compareAndSet(null, u)) 5272 // quietlyCompleteRoot(); 5273 // return; 5274 // } 5275 // } 5276 // } 5277 // } 5278 // } 5279 5280 // @SuppressWarnings("serial") 5281 // static final class SearchMappingsTask<K,V,U> 5282 // extends BulkTask<K,V,U> { 5283 // final BiFunction<? super K, ? super V, ? extends U> searchFunction; 5284 // final AtomicReference<U> result; 5285 // SearchMappingsTask 5286 // (BulkTask<K,V,?> p, int b, int i, int f, Node!(K, V)[] t, 5287 // BiFunction<? super K, ? super V, ? extends U> searchFunction, 5288 // AtomicReference<U> result) { 5289 // super(p, b, i, f, t); 5290 // this.searchFunction = searchFunction; this.result = result; 5291 // } 5292 // public final U getRawResult() { return result.get(); } 5293 // public final void compute() { 5294 // final BiFunction<? super K, ? super V, ? extends U> searchFunction; 5295 // final AtomicReference<U> result; 5296 // if ((searchFunction = this.searchFunction) != null && 5297 // (result = this.result) != null) { 5298 // for (int i = baseIndex, f, h; batch > 0 && 5299 // (h = ((f = baseLimit) + i) >>> 1) > i;) { 5300 // if (result.get() != null) 5301 // return; 5302 // addToPendingCount(1); 5303 // new SearchMappingsTask<K,V,U> 5304 // (this, batch >>>= 1, baseLimit = h, f, tab, 5305 // searchFunction, result).fork(); 5306 // } 5307 // while (result.get() == null) { 5308 // U u; 5309 // Node!(K, V) p; 5310 // if ((p = advance()) == null) { 5311 // propagateCompletion(); 5312 // break; 5313 // } 5314 // if ((u = searchFunction.apply(p.key, p.val)) != null) { 5315 // if (result.compareAndSet(null, u)) 5316 // quietlyCompleteRoot(); 5317 // break; 5318 // } 5319 // } 5320 // } 5321 // } 5322 // } 5323 5324 // @SuppressWarnings("serial") 5325 // static final class ReduceKeysTask!(K, V) 5326 // extends BulkTask<K,V,K> { 5327 // final BiFunction<? super K, ? super K, ? extends K> reducer; 5328 // K result; 5329 // ReduceKeysTask!(K, V) rights, nextRight; 5330 // ReduceKeysTask 5331 // (BulkTask<K,V,?> p, int b, int i, int f, Node!(K, V)[] t, 5332 // ReduceKeysTask!(K, V) nextRight, 5333 // BiFunction<? super K, ? super K, ? extends K> reducer) { 5334 // super(p, b, i, f, t); this.nextRight = nextRight; 5335 // this.reducer = reducer; 5336 // } 5337 // public final K getRawResult() { return result; } 5338 // public final void compute() { 5339 // final BiFunction<? super K, ? super K, ? extends K> reducer; 5340 // if ((reducer = this.reducer) != null) { 5341 // for (int i = baseIndex, f, h; batch > 0 && 5342 // (h = ((f = baseLimit) + i) >>> 1) > i;) { 5343 // addToPendingCount(1); 5344 // (rights = new ReduceKeysTask!(K, V) 5345 // (this, batch >>>= 1, baseLimit = h, f, tab, 5346 // rights, reducer)).fork(); 5347 // } 5348 // K r = null; 5349 // for (Node!(K, V) p; (p = advance()) != null; ) { 5350 // K u = p.key; 5351 // r = (r == null) ? u : u == null ? r : reducer.apply(r, u); 5352 // } 5353 // result = r; 5354 // CountedCompleter<?> c; 5355 // for (c = firstComplete(); c != null; c = c.nextComplete()) { 5356 5357 // ReduceKeysTask!(K, V) 5358 // t = (ReduceKeysTask!(K, V))c, 5359 // s = t.rights; 5360 // while (s != null) { 5361 // K tr, sr; 5362 // if ((sr = s.result) != null) 5363 // t.result = (((tr = t.result) == null) ? sr : 5364 // reducer.apply(tr, sr)); 5365 // s = t.rights = s.nextRight; 5366 // } 5367 // } 5368 // } 5369 // } 5370 // } 5371 5372 // @SuppressWarnings("serial") 5373 // static final class ReduceValuesTask!(K, V) 5374 // extends BulkTask<K,V,V> { 5375 // final BiFunction<? super V, ? super V, ? extends V> reducer; 5376 // V result; 5377 // ReduceValuesTask!(K, V) rights, nextRight; 5378 // ReduceValuesTask 5379 // (BulkTask<K,V,?> p, int b, int i, int f, Node!(K, V)[] t, 5380 // ReduceValuesTask!(K, V) nextRight, 5381 // BiFunction<? super V, ? super V, ? extends V> reducer) { 5382 // super(p, b, i, f, t); this.nextRight = nextRight; 5383 // this.reducer = reducer; 5384 // } 5385 // public final V getRawResult() { return result; } 5386 // public final void compute() { 5387 // final BiFunction<? super V, ? super V, ? extends V> reducer; 5388 // if ((reducer = this.reducer) != null) { 5389 // for (int i = baseIndex, f, h; batch > 0 && 5390 // (h = ((f = baseLimit) + i) >>> 1) > i;) { 5391 // addToPendingCount(1); 5392 // (rights = new ReduceValuesTask!(K, V) 5393 // (this, batch >>>= 1, baseLimit = h, f, tab, 5394 // rights, reducer)).fork(); 5395 // } 5396 // V r = null; 5397 // for (Node!(K, V) p; (p = advance()) != null; ) { 5398 // V v = p.val; 5399 // r = (r == null) ? v : reducer.apply(r, v); 5400 // } 5401 // result = r; 5402 // CountedCompleter<?> c; 5403 // for (c = firstComplete(); c != null; c = c.nextComplete()) { 5404 5405 // ReduceValuesTask!(K, V) 5406 // t = (ReduceValuesTask!(K, V))c, 5407 // s = t.rights; 5408 // while (s != null) { 5409 // V tr, sr; 5410 // if ((sr = s.result) != null) 5411 // t.result = (((tr = t.result) == null) ? sr : 5412 // reducer.apply(tr, sr)); 5413 // s = t.rights = s.nextRight; 5414 // } 5415 // } 5416 // } 5417 // } 5418 // } 5419 5420 // @SuppressWarnings("serial") 5421 // static final class ReduceEntriesTask!(K, V) 5422 // extends BulkTask<K,V,Map.Entry!(K, V)> { 5423 // final BiFunction<Map.Entry!(K, V), Map.Entry!(K, V), ? extends Map.Entry!(K, V)> reducer; 5424 // Map.Entry!(K, V) result; 5425 // ReduceEntriesTask!(K, V) rights, nextRight; 5426 // ReduceEntriesTask 5427 // (BulkTask<K,V,?> p, int b, int i, int f, Node!(K, V)[] t, 5428 // ReduceEntriesTask!(K, V) nextRight, 5429 // BiFunction<Entry!(K, V), Map.Entry!(K, V), ? extends Map.Entry!(K, V)> reducer) { 5430 // super(p, b, i, f, t); this.nextRight = nextRight; 5431 // this.reducer = reducer; 5432 // } 5433 // public final Map.Entry!(K, V) getRawResult() { return result; } 5434 // public final void compute() { 5435 // final BiFunction<Map.Entry!(K, V), Map.Entry!(K, V), ? extends Map.Entry!(K, V)> reducer; 5436 // if ((reducer = this.reducer) != null) { 5437 // for (int i = baseIndex, f, h; batch > 0 && 5438 // (h = ((f = baseLimit) + i) >>> 1) > i;) { 5439 // addToPendingCount(1); 5440 // (rights = new ReduceEntriesTask!(K, V) 5441 // (this, batch >>>= 1, baseLimit = h, f, tab, 5442 // rights, reducer)).fork(); 5443 // } 5444 // Map.Entry!(K, V) r = null; 5445 // for (Node!(K, V) p; (p = advance()) != null; ) 5446 // r = (r == null) ? p : reducer.apply(r, p); 5447 // result = r; 5448 // CountedCompleter<?> c; 5449 // for (c = firstComplete(); c != null; c = c.nextComplete()) { 5450 5451 // ReduceEntriesTask!(K, V) 5452 // t = (ReduceEntriesTask!(K, V))c, 5453 // s = t.rights; 5454 // while (s != null) { 5455 // Map.Entry!(K, V) tr, sr; 5456 // if ((sr = s.result) != null) 5457 // t.result = (((tr = t.result) == null) ? sr : 5458 // reducer.apply(tr, sr)); 5459 // s = t.rights = s.nextRight; 5460 // } 5461 // } 5462 // } 5463 // } 5464 // } 5465 5466 // @SuppressWarnings("serial") 5467 // static final class MapReduceKeysTask<K,V,U> 5468 // extends BulkTask<K,V,U> { 5469 // final Function<? super K, ? extends U> transformer; 5470 // final BiFunction<? super U, ? super U, ? extends U> reducer; 5471 // U result; 5472 // MapReduceKeysTask<K,V,U> rights, nextRight; 5473 // MapReduceKeysTask 5474 // (BulkTask<K,V,?> p, int b, int i, int f, Node!(K, V)[] t, 5475 // MapReduceKeysTask<K,V,U> nextRight, 5476 // Function<? super K, ? extends U> transformer, 5477 // BiFunction<? super U, ? super U, ? extends U> reducer) { 5478 // super(p, b, i, f, t); this.nextRight = nextRight; 5479 // this.transformer = transformer; 5480 // this.reducer = reducer; 5481 // } 5482 // public final U getRawResult() { return result; } 5483 // public final void compute() { 5484 // final Function<? super K, ? extends U> transformer; 5485 // final BiFunction<? super U, ? super U, ? extends U> reducer; 5486 // if ((transformer = this.transformer) != null && 5487 // (reducer = this.reducer) != null) { 5488 // for (int i = baseIndex, f, h; batch > 0 && 5489 // (h = ((f = baseLimit) + i) >>> 1) > i;) { 5490 // addToPendingCount(1); 5491 // (rights = new MapReduceKeysTask<K,V,U> 5492 // (this, batch >>>= 1, baseLimit = h, f, tab, 5493 // rights, transformer, reducer)).fork(); 5494 // } 5495 // U r = null; 5496 // for (Node!(K, V) p; (p = advance()) != null; ) { 5497 // U u; 5498 // if ((u = transformer.apply(p.key)) != null) 5499 // r = (r == null) ? u : reducer.apply(r, u); 5500 // } 5501 // result = r; 5502 // CountedCompleter<?> c; 5503 // for (c = firstComplete(); c != null; c = c.nextComplete()) { 5504 5505 // MapReduceKeysTask<K,V,U> 5506 // t = (MapReduceKeysTask<K,V,U>)c, 5507 // s = t.rights; 5508 // while (s != null) { 5509 // U tr, sr; 5510 // if ((sr = s.result) != null) 5511 // t.result = (((tr = t.result) == null) ? sr : 5512 // reducer.apply(tr, sr)); 5513 // s = t.rights = s.nextRight; 5514 // } 5515 // } 5516 // } 5517 // } 5518 // } 5519 5520 // @SuppressWarnings("serial") 5521 // static final class MapReduceValuesTask<K,V,U> 5522 // extends BulkTask<K,V,U> { 5523 // final Function<? super V, ? extends U> transformer; 5524 // final BiFunction<? super U, ? super U, ? extends U> reducer; 5525 // U result; 5526 // MapReduceValuesTask<K,V,U> rights, nextRight; 5527 // MapReduceValuesTask 5528 // (BulkTask<K,V,?> p, int b, int i, int f, Node!(K, V)[] t, 5529 // MapReduceValuesTask<K,V,U> nextRight, 5530 // Function<? super V, ? extends U> transformer, 5531 // BiFunction<? super U, ? super U, ? extends U> reducer) { 5532 // super(p, b, i, f, t); this.nextRight = nextRight; 5533 // this.transformer = transformer; 5534 // this.reducer = reducer; 5535 // } 5536 // public final U getRawResult() { return result; } 5537 // public final void compute() { 5538 // final Function<? super V, ? extends U> transformer; 5539 // final BiFunction<? super U, ? super U, ? extends U> reducer; 5540 // if ((transformer = this.transformer) != null && 5541 // (reducer = this.reducer) != null) { 5542 // for (int i = baseIndex, f, h; batch > 0 && 5543 // (h = ((f = baseLimit) + i) >>> 1) > i;) { 5544 // addToPendingCount(1); 5545 // (rights = new MapReduceValuesTask<K,V,U> 5546 // (this, batch >>>= 1, baseLimit = h, f, tab, 5547 // rights, transformer, reducer)).fork(); 5548 // } 5549 // U r = null; 5550 // for (Node!(K, V) p; (p = advance()) != null; ) { 5551 // U u; 5552 // if ((u = transformer.apply(p.val)) != null) 5553 // r = (r == null) ? u : reducer.apply(r, u); 5554 // } 5555 // result = r; 5556 // CountedCompleter<?> c; 5557 // for (c = firstComplete(); c != null; c = c.nextComplete()) { 5558 5559 // MapReduceValuesTask<K,V,U> 5560 // t = (MapReduceValuesTask<K,V,U>)c, 5561 // s = t.rights; 5562 // while (s != null) { 5563 // U tr, sr; 5564 // if ((sr = s.result) != null) 5565 // t.result = (((tr = t.result) == null) ? sr : 5566 // reducer.apply(tr, sr)); 5567 // s = t.rights = s.nextRight; 5568 // } 5569 // } 5570 // } 5571 // } 5572 // } 5573 5574 // @SuppressWarnings("serial") 5575 // static final class MapReduceEntriesTask<K,V,U> 5576 // extends BulkTask<K,V,U> { 5577 // final Function<Map.Entry!(K, V), ? extends U> transformer; 5578 // final BiFunction<? super U, ? super U, ? extends U> reducer; 5579 // U result; 5580 // MapReduceEntriesTask<K,V,U> rights, nextRight; 5581 // MapReduceEntriesTask 5582 // (BulkTask<K,V,?> p, int b, int i, int f, Node!(K, V)[] t, 5583 // MapReduceEntriesTask<K,V,U> nextRight, 5584 // Function<Map.Entry!(K, V), ? extends U> transformer, 5585 // BiFunction<? super U, ? super U, ? extends U> reducer) { 5586 // super(p, b, i, f, t); this.nextRight = nextRight; 5587 // this.transformer = transformer; 5588 // this.reducer = reducer; 5589 // } 5590 // public final U getRawResult() { return result; } 5591 // public final void compute() { 5592 // final Function<Map.Entry!(K, V), ? extends U> transformer; 5593 // final BiFunction<? super U, ? super U, ? extends U> reducer; 5594 // if ((transformer = this.transformer) != null && 5595 // (reducer = this.reducer) != null) { 5596 // for (int i = baseIndex, f, h; batch > 0 && 5597 // (h = ((f = baseLimit) + i) >>> 1) > i;) { 5598 // addToPendingCount(1); 5599 // (rights = new MapReduceEntriesTask<K,V,U> 5600 // (this, batch >>>= 1, baseLimit = h, f, tab, 5601 // rights, transformer, reducer)).fork(); 5602 // } 5603 // U r = null; 5604 // for (Node!(K, V) p; (p = advance()) != null; ) { 5605 // U u; 5606 // if ((u = transformer.apply(p)) != null) 5607 // r = (r == null) ? u : reducer.apply(r, u); 5608 // } 5609 // result = r; 5610 // CountedCompleter<?> c; 5611 // for (c = firstComplete(); c != null; c = c.nextComplete()) { 5612 5613 // MapReduceEntriesTask<K,V,U> 5614 // t = (MapReduceEntriesTask<K,V,U>)c, 5615 // s = t.rights; 5616 // while (s != null) { 5617 // U tr, sr; 5618 // if ((sr = s.result) != null) 5619 // t.result = (((tr = t.result) == null) ? sr : 5620 // reducer.apply(tr, sr)); 5621 // s = t.rights = s.nextRight; 5622 // } 5623 // } 5624 // } 5625 // } 5626 // } 5627 5628 // @SuppressWarnings("serial") 5629 // static final class MapReduceMappingsTask<K,V,U> 5630 // extends BulkTask<K,V,U> { 5631 // final BiFunction<? super K, ? super V, ? extends U> transformer; 5632 // final BiFunction<? super U, ? super U, ? extends U> reducer; 5633 // U result; 5634 // MapReduceMappingsTask<K,V,U> rights, nextRight; 5635 // MapReduceMappingsTask 5636 // (BulkTask<K,V,?> p, int b, int i, int f, Node!(K, V)[] t, 5637 // MapReduceMappingsTask<K,V,U> nextRight, 5638 // BiFunction<? super K, ? super V, ? extends U> transformer, 5639 // BiFunction<? super U, ? super U, ? extends U> reducer) { 5640 // super(p, b, i, f, t); this.nextRight = nextRight; 5641 // this.transformer = transformer; 5642 // this.reducer = reducer; 5643 // } 5644 // public final U getRawResult() { return result; } 5645 // public final void compute() { 5646 // final BiFunction<? super K, ? super V, ? extends U> transformer; 5647 // final BiFunction<? super U, ? super U, ? extends U> reducer; 5648 // if ((transformer = this.transformer) != null && 5649 // (reducer = this.reducer) != null) { 5650 // for (int i = baseIndex, f, h; batch > 0 && 5651 // (h = ((f = baseLimit) + i) >>> 1) > i;) { 5652 // addToPendingCount(1); 5653 // (rights = new MapReduceMappingsTask<K,V,U> 5654 // (this, batch >>>= 1, baseLimit = h, f, tab, 5655 // rights, transformer, reducer)).fork(); 5656 // } 5657 // U r = null; 5658 // for (Node!(K, V) p; (p = advance()) != null; ) { 5659 // U u; 5660 // if ((u = transformer.apply(p.key, p.val)) != null) 5661 // r = (r == null) ? u : reducer.apply(r, u); 5662 // } 5663 // result = r; 5664 // CountedCompleter<?> c; 5665 // for (c = firstComplete(); c != null; c = c.nextComplete()) { 5666 5667 // MapReduceMappingsTask<K,V,U> 5668 // t = (MapReduceMappingsTask<K,V,U>)c, 5669 // s = t.rights; 5670 // while (s != null) { 5671 // U tr, sr; 5672 // if ((sr = s.result) != null) 5673 // t.result = (((tr = t.result) == null) ? sr : 5674 // reducer.apply(tr, sr)); 5675 // s = t.rights = s.nextRight; 5676 // } 5677 // } 5678 // } 5679 // } 5680 // } 5681 5682 // @SuppressWarnings("serial") 5683 // static final class MapReduceKeysToDoubleTask!(K, V) 5684 // extends BulkTask<K,V,Double> { 5685 // final ToDoubleFunction<? super K> transformer; 5686 // final DoubleBinaryOperator reducer; 5687 // final double basis; 5688 // double result; 5689 // MapReduceKeysToDoubleTask!(K, V) rights, nextRight; 5690 // MapReduceKeysToDoubleTask 5691 // (BulkTask<K,V,?> p, int b, int i, int f, Node!(K, V)[] t, 5692 // MapReduceKeysToDoubleTask!(K, V) nextRight, 5693 // ToDoubleFunction<? super K> transformer, 5694 // double basis, 5695 // DoubleBinaryOperator reducer) { 5696 // super(p, b, i, f, t); this.nextRight = nextRight; 5697 // this.transformer = transformer; 5698 // this.basis = basis; this.reducer = reducer; 5699 // } 5700 // public final Double getRawResult() { return result; } 5701 // public final void compute() { 5702 // final ToDoubleFunction<? super K> transformer; 5703 // final DoubleBinaryOperator reducer; 5704 // if ((transformer = this.transformer) != null && 5705 // (reducer = this.reducer) != null) { 5706 // double r = this.basis; 5707 // for (int i = baseIndex, f, h; batch > 0 && 5708 // (h = ((f = baseLimit) + i) >>> 1) > i;) { 5709 // addToPendingCount(1); 5710 // (rights = new MapReduceKeysToDoubleTask!(K, V) 5711 // (this, batch >>>= 1, baseLimit = h, f, tab, 5712 // rights, transformer, r, reducer)).fork(); 5713 // } 5714 // for (Node!(K, V) p; (p = advance()) != null; ) 5715 // r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key)); 5716 // result = r; 5717 // CountedCompleter<?> c; 5718 // for (c = firstComplete(); c != null; c = c.nextComplete()) { 5719 5720 // MapReduceKeysToDoubleTask!(K, V) 5721 // t = (MapReduceKeysToDoubleTask!(K, V))c, 5722 // s = t.rights; 5723 // while (s != null) { 5724 // t.result = reducer.applyAsDouble(t.result, s.result); 5725 // s = t.rights = s.nextRight; 5726 // } 5727 // } 5728 // } 5729 // } 5730 // } 5731 5732 // @SuppressWarnings("serial") 5733 // static final class MapReduceValuesToDoubleTask!(K, V) 5734 // extends BulkTask<K,V,Double> { 5735 // final ToDoubleFunction<? super V> transformer; 5736 // final DoubleBinaryOperator reducer; 5737 // final double basis; 5738 // double result; 5739 // MapReduceValuesToDoubleTask!(K, V) rights, nextRight; 5740 // MapReduceValuesToDoubleTask 5741 // (BulkTask<K,V,?> p, int b, int i, int f, Node!(K, V)[] t, 5742 // MapReduceValuesToDoubleTask!(K, V) nextRight, 5743 // ToDoubleFunction<? super V> transformer, 5744 // double basis, 5745 // DoubleBinaryOperator reducer) { 5746 // super(p, b, i, f, t); this.nextRight = nextRight; 5747 // this.transformer = transformer; 5748 // this.basis = basis; this.reducer = reducer; 5749 // } 5750 // public final Double getRawResult() { return result; } 5751 // public final void compute() { 5752 // final ToDoubleFunction<? super V> transformer; 5753 // final DoubleBinaryOperator reducer; 5754 // if ((transformer = this.transformer) != null && 5755 // (reducer = this.reducer) != null) { 5756 // double r = this.basis; 5757 // for (int i = baseIndex, f, h; batch > 0 && 5758 // (h = ((f = baseLimit) + i) >>> 1) > i;) { 5759 // addToPendingCount(1); 5760 // (rights = new MapReduceValuesToDoubleTask!(K, V) 5761 // (this, batch >>>= 1, baseLimit = h, f, tab, 5762 // rights, transformer, r, reducer)).fork(); 5763 // } 5764 // for (Node!(K, V) p; (p = advance()) != null; ) 5765 // r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.val)); 5766 // result = r; 5767 // CountedCompleter<?> c; 5768 // for (c = firstComplete(); c != null; c = c.nextComplete()) { 5769 5770 // MapReduceValuesToDoubleTask!(K, V) 5771 // t = (MapReduceValuesToDoubleTask!(K, V))c, 5772 // s = t.rights; 5773 // while (s != null) { 5774 // t.result = reducer.applyAsDouble(t.result, s.result); 5775 // s = t.rights = s.nextRight; 5776 // } 5777 // } 5778 // } 5779 // } 5780 // } 5781 5782 // @SuppressWarnings("serial") 5783 // static final class MapReduceEntriesToDoubleTask!(K, V) 5784 // extends BulkTask<K,V,Double> { 5785 // final ToDoubleFunction<Map.Entry!(K, V)> transformer; 5786 // final DoubleBinaryOperator reducer; 5787 // final double basis; 5788 // double result; 5789 // MapReduceEntriesToDoubleTask!(K, V) rights, nextRight; 5790 // MapReduceEntriesToDoubleTask 5791 // (BulkTask<K,V,?> p, int b, int i, int f, Node!(K, V)[] t, 5792 // MapReduceEntriesToDoubleTask!(K, V) nextRight, 5793 // ToDoubleFunction<Map.Entry!(K, V)> transformer, 5794 // double basis, 5795 // DoubleBinaryOperator reducer) { 5796 // super(p, b, i, f, t); this.nextRight = nextRight; 5797 // this.transformer = transformer; 5798 // this.basis = basis; this.reducer = reducer; 5799 // } 5800 // public final Double getRawResult() { return result; } 5801 // public final void compute() { 5802 // final ToDoubleFunction<Map.Entry!(K, V)> transformer; 5803 // final DoubleBinaryOperator reducer; 5804 // if ((transformer = this.transformer) != null && 5805 // (reducer = this.reducer) != null) { 5806 // double r = this.basis; 5807 // for (int i = baseIndex, f, h; batch > 0 && 5808 // (h = ((f = baseLimit) + i) >>> 1) > i;) { 5809 // addToPendingCount(1); 5810 // (rights = new MapReduceEntriesToDoubleTask!(K, V) 5811 // (this, batch >>>= 1, baseLimit = h, f, tab, 5812 // rights, transformer, r, reducer)).fork(); 5813 // } 5814 // for (Node!(K, V) p; (p = advance()) != null; ) 5815 // r = reducer.applyAsDouble(r, transformer.applyAsDouble(p)); 5816 // result = r; 5817 // CountedCompleter<?> c; 5818 // for (c = firstComplete(); c != null; c = c.nextComplete()) { 5819 5820 // MapReduceEntriesToDoubleTask!(K, V) 5821 // t = (MapReduceEntriesToDoubleTask!(K, V))c, 5822 // s = t.rights; 5823 // while (s != null) { 5824 // t.result = reducer.applyAsDouble(t.result, s.result); 5825 // s = t.rights = s.nextRight; 5826 // } 5827 // } 5828 // } 5829 // } 5830 // } 5831 5832 // @SuppressWarnings("serial") 5833 // static final class MapReduceMappingsToDoubleTask!(K, V) 5834 // extends BulkTask<K,V,Double> { 5835 // final ToDoubleBiFunction<? super K, ? super V> transformer; 5836 // final DoubleBinaryOperator reducer; 5837 // final double basis; 5838 // double result; 5839 // MapReduceMappingsToDoubleTask!(K, V) rights, nextRight; 5840 // MapReduceMappingsToDoubleTask 5841 // (BulkTask<K,V,?> p, int b, int i, int f, Node!(K, V)[] t, 5842 // MapReduceMappingsToDoubleTask!(K, V) nextRight, 5843 // ToDoubleBiFunction<? super K, ? super V> transformer, 5844 // double basis, 5845 // DoubleBinaryOperator reducer) { 5846 // super(p, b, i, f, t); this.nextRight = nextRight; 5847 // this.transformer = transformer; 5848 // this.basis = basis; this.reducer = reducer; 5849 // } 5850 // public final Double getRawResult() { return result; } 5851 // public final void compute() { 5852 // final ToDoubleBiFunction<? super K, ? super V> transformer; 5853 // final DoubleBinaryOperator reducer; 5854 // if ((transformer = this.transformer) != null && 5855 // (reducer = this.reducer) != null) { 5856 // double r = this.basis; 5857 // for (int i = baseIndex, f, h; batch > 0 && 5858 // (h = ((f = baseLimit) + i) >>> 1) > i;) { 5859 // addToPendingCount(1); 5860 // (rights = new MapReduceMappingsToDoubleTask!(K, V) 5861 // (this, batch >>>= 1, baseLimit = h, f, tab, 5862 // rights, transformer, r, reducer)).fork(); 5863 // } 5864 // for (Node!(K, V) p; (p = advance()) != null; ) 5865 // r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key, p.val)); 5866 // result = r; 5867 // CountedCompleter<?> c; 5868 // for (c = firstComplete(); c != null; c = c.nextComplete()) { 5869 5870 // MapReduceMappingsToDoubleTask!(K, V) 5871 // t = (MapReduceMappingsToDoubleTask!(K, V))c, 5872 // s = t.rights; 5873 // while (s != null) { 5874 // t.result = reducer.applyAsDouble(t.result, s.result); 5875 // s = t.rights = s.nextRight; 5876 // } 5877 // } 5878 // } 5879 // } 5880 // } 5881 5882 // @SuppressWarnings("serial") 5883 // static final class MapReduceKeysToLongTask!(K, V) 5884 // extends BulkTask<K,V,Long> { 5885 // final ToLongFunction<? super K> transformer; 5886 // final LongBinaryOperator reducer; 5887 // final long basis; 5888 // long result; 5889 // MapReduceKeysToLongTask!(K, V) rights, nextRight; 5890 // MapReduceKeysToLongTask 5891 // (BulkTask<K,V,?> p, int b, int i, int f, Node!(K, V)[] t, 5892 // MapReduceKeysToLongTask!(K, V) nextRight, 5893 // ToLongFunction<? super K> transformer, 5894 // long basis, 5895 // LongBinaryOperator reducer) { 5896 // super(p, b, i, f, t); this.nextRight = nextRight; 5897 // this.transformer = transformer; 5898 // this.basis = basis; this.reducer = reducer; 5899 // } 5900 // public final Long getRawResult() { return result; } 5901 // public final void compute() { 5902 // final ToLongFunction<? super K> transformer; 5903 // final LongBinaryOperator reducer; 5904 // if ((transformer = this.transformer) != null && 5905 // (reducer = this.reducer) != null) { 5906 // long r = this.basis; 5907 // for (int i = baseIndex, f, h; batch > 0 && 5908 // (h = ((f = baseLimit) + i) >>> 1) > i;) { 5909 // addToPendingCount(1); 5910 // (rights = new MapReduceKeysToLongTask!(K, V) 5911 // (this, batch >>>= 1, baseLimit = h, f, tab, 5912 // rights, transformer, r, reducer)).fork(); 5913 // } 5914 // for (Node!(K, V) p; (p = advance()) != null; ) 5915 // r = reducer.applyAsLong(r, transformer.applyAsLong(p.key)); 5916 // result = r; 5917 // CountedCompleter<?> c; 5918 // for (c = firstComplete(); c != null; c = c.nextComplete()) { 5919 5920 // MapReduceKeysToLongTask!(K, V) 5921 // t = (MapReduceKeysToLongTask!(K, V))c, 5922 // s = t.rights; 5923 // while (s != null) { 5924 // t.result = reducer.applyAsLong(t.result, s.result); 5925 // s = t.rights = s.nextRight; 5926 // } 5927 // } 5928 // } 5929 // } 5930 // } 5931 5932 // @SuppressWarnings("serial") 5933 // static final class MapReduceValuesToLongTask!(K, V) 5934 // extends BulkTask<K,V,Long> { 5935 // final ToLongFunction<? super V> transformer; 5936 // final LongBinaryOperator reducer; 5937 // final long basis; 5938 // long result; 5939 // MapReduceValuesToLongTask!(K, V) rights, nextRight; 5940 // MapReduceValuesToLongTask 5941 // (BulkTask<K,V,?> p, int b, int i, int f, Node!(K, V)[] t, 5942 // MapReduceValuesToLongTask!(K, V) nextRight, 5943 // ToLongFunction<? super V> transformer, 5944 // long basis, 5945 // LongBinaryOperator reducer) { 5946 // super(p, b, i, f, t); this.nextRight = nextRight; 5947 // this.transformer = transformer; 5948 // this.basis = basis; this.reducer = reducer; 5949 // } 5950 // public final Long getRawResult() { return result; } 5951 // public final void compute() { 5952 // final ToLongFunction<? super V> transformer; 5953 // final LongBinaryOperator reducer; 5954 // if ((transformer = this.transformer) != null && 5955 // (reducer = this.reducer) != null) { 5956 // long r = this.basis; 5957 // for (int i = baseIndex, f, h; batch > 0 && 5958 // (h = ((f = baseLimit) + i) >>> 1) > i;) { 5959 // addToPendingCount(1); 5960 // (rights = new MapReduceValuesToLongTask!(K, V) 5961 // (this, batch >>>= 1, baseLimit = h, f, tab, 5962 // rights, transformer, r, reducer)).fork(); 5963 // } 5964 // for (Node!(K, V) p; (p = advance()) != null; ) 5965 // r = reducer.applyAsLong(r, transformer.applyAsLong(p.val)); 5966 // result = r; 5967 // CountedCompleter<?> c; 5968 // for (c = firstComplete(); c != null; c = c.nextComplete()) { 5969 5970 // MapReduceValuesToLongTask!(K, V) 5971 // t = (MapReduceValuesToLongTask!(K, V))c, 5972 // s = t.rights; 5973 // while (s != null) { 5974 // t.result = reducer.applyAsLong(t.result, s.result); 5975 // s = t.rights = s.nextRight; 5976 // } 5977 // } 5978 // } 5979 // } 5980 // } 5981 5982 // @SuppressWarnings("serial") 5983 // static final class MapReduceEntriesToLongTask!(K, V) 5984 // extends BulkTask<K,V,Long> { 5985 // final ToLongFunction<Map.Entry!(K, V)> transformer; 5986 // final LongBinaryOperator reducer; 5987 // final long basis; 5988 // long result; 5989 // MapReduceEntriesToLongTask!(K, V) rights, nextRight; 5990 // MapReduceEntriesToLongTask 5991 // (BulkTask<K,V,?> p, int b, int i, int f, Node!(K, V)[] t, 5992 // MapReduceEntriesToLongTask!(K, V) nextRight, 5993 // ToLongFunction<Map.Entry!(K, V)> transformer, 5994 // long basis, 5995 // LongBinaryOperator reducer) { 5996 // super(p, b, i, f, t); this.nextRight = nextRight; 5997 // this.transformer = transformer; 5998 // this.basis = basis; this.reducer = reducer; 5999 // } 6000 // public final Long getRawResult() { return result; } 6001 // public final void compute() { 6002 // final ToLongFunction<Map.Entry!(K, V)> transformer; 6003 // final LongBinaryOperator reducer; 6004 // if ((transformer = this.transformer) != null && 6005 // (reducer = this.reducer) != null) { 6006 // long r = this.basis; 6007 // for (int i = baseIndex, f, h; batch > 0 && 6008 // (h = ((f = baseLimit) + i) >>> 1) > i;) { 6009 // addToPendingCount(1); 6010 // (rights = new MapReduceEntriesToLongTask!(K, V) 6011 // (this, batch >>>= 1, baseLimit = h, f, tab, 6012 // rights, transformer, r, reducer)).fork(); 6013 // } 6014 // for (Node!(K, V) p; (p = advance()) != null; ) 6015 // r = reducer.applyAsLong(r, transformer.applyAsLong(p)); 6016 // result = r; 6017 // CountedCompleter<?> c; 6018 // for (c = firstComplete(); c != null; c = c.nextComplete()) { 6019 6020 // MapReduceEntriesToLongTask!(K, V) 6021 // t = (MapReduceEntriesToLongTask!(K, V))c, 6022 // s = t.rights; 6023 // while (s != null) { 6024 // t.result = reducer.applyAsLong(t.result, s.result); 6025 // s = t.rights = s.nextRight; 6026 // } 6027 // } 6028 // } 6029 // } 6030 // } 6031 6032 // @SuppressWarnings("serial") 6033 // static final class MapReduceMappingsToLongTask!(K, V) 6034 // extends BulkTask<K,V,Long> { 6035 // final ToLongBiFunction<? super K, ? super V> transformer; 6036 // final LongBinaryOperator reducer; 6037 // final long basis; 6038 // long result; 6039 // MapReduceMappingsToLongTask!(K, V) rights, nextRight; 6040 // MapReduceMappingsToLongTask 6041 // (BulkTask<K,V,?> p, int b, int i, int f, Node!(K, V)[] t, 6042 // MapReduceMappingsToLongTask!(K, V) nextRight, 6043 // ToLongBiFunction<? super K, ? super V> transformer, 6044 // long basis, 6045 // LongBinaryOperator reducer) { 6046 // super(p, b, i, f, t); this.nextRight = nextRight; 6047 // this.transformer = transformer; 6048 // this.basis = basis; this.reducer = reducer; 6049 // } 6050 // public final Long getRawResult() { return result; } 6051 // public final void compute() { 6052 // final ToLongBiFunction<? super K, ? super V> transformer; 6053 // final LongBinaryOperator reducer; 6054 // if ((transformer = this.transformer) != null && 6055 // (reducer = this.reducer) != null) { 6056 // long r = this.basis; 6057 // for (int i = baseIndex, f, h; batch > 0 && 6058 // (h = ((f = baseLimit) + i) >>> 1) > i;) { 6059 // addToPendingCount(1); 6060 // (rights = new MapReduceMappingsToLongTask!(K, V) 6061 // (this, batch >>>= 1, baseLimit = h, f, tab, 6062 // rights, transformer, r, reducer)).fork(); 6063 // } 6064 // for (Node!(K, V) p; (p = advance()) != null; ) 6065 // r = reducer.applyAsLong(r, transformer.applyAsLong(p.key, p.val)); 6066 // result = r; 6067 // CountedCompleter<?> c; 6068 // for (c = firstComplete(); c != null; c = c.nextComplete()) { 6069 6070 // MapReduceMappingsToLongTask!(K, V) 6071 // t = (MapReduceMappingsToLongTask!(K, V))c, 6072 // s = t.rights; 6073 // while (s != null) { 6074 // t.result = reducer.applyAsLong(t.result, s.result); 6075 // s = t.rights = s.nextRight; 6076 // } 6077 // } 6078 // } 6079 // } 6080 // } 6081 6082 // @SuppressWarnings("serial") 6083 // static final class MapReduceKeysToIntTask!(K, V) 6084 // extends BulkTask<K,V,Integer> { 6085 // final ToIntFunction<? super K> transformer; 6086 // final IntBinaryOperator reducer; 6087 // final int basis; 6088 // int result; 6089 // MapReduceKeysToIntTask!(K, V) rights, nextRight; 6090 // MapReduceKeysToIntTask 6091 // (BulkTask<K,V,?> p, int b, int i, int f, Node!(K, V)[] t, 6092 // MapReduceKeysToIntTask!(K, V) nextRight, 6093 // ToIntFunction<? super K> transformer, 6094 // int basis, 6095 // IntBinaryOperator reducer) { 6096 // super(p, b, i, f, t); this.nextRight = nextRight; 6097 // this.transformer = transformer; 6098 // this.basis = basis; this.reducer = reducer; 6099 // } 6100 // public final Integer getRawResult() { return result; } 6101 // public final void compute() { 6102 // final ToIntFunction<? super K> transformer; 6103 // final IntBinaryOperator reducer; 6104 // if ((transformer = this.transformer) != null && 6105 // (reducer = this.reducer) != null) { 6106 // int r = this.basis; 6107 // for (int i = baseIndex, f, h; batch > 0 && 6108 // (h = ((f = baseLimit) + i) >>> 1) > i;) { 6109 // addToPendingCount(1); 6110 // (rights = new MapReduceKeysToIntTask!(K, V) 6111 // (this, batch >>>= 1, baseLimit = h, f, tab, 6112 // rights, transformer, r, reducer)).fork(); 6113 // } 6114 // for (Node!(K, V) p; (p = advance()) != null; ) 6115 // r = reducer.applyAsInt(r, transformer.applyAsInt(p.key)); 6116 // result = r; 6117 // CountedCompleter<?> c; 6118 // for (c = firstComplete(); c != null; c = c.nextComplete()) { 6119 6120 // MapReduceKeysToIntTask!(K, V) 6121 // t = (MapReduceKeysToIntTask!(K, V))c, 6122 // s = t.rights; 6123 // while (s != null) { 6124 // t.result = reducer.applyAsInt(t.result, s.result); 6125 // s = t.rights = s.nextRight; 6126 // } 6127 // } 6128 // } 6129 // } 6130 // } 6131 6132 // @SuppressWarnings("serial") 6133 // static final class MapReduceValuesToIntTask!(K, V) 6134 // extends BulkTask<K,V,Integer> { 6135 // final ToIntFunction<? super V> transformer; 6136 // final IntBinaryOperator reducer; 6137 // final int basis; 6138 // int result; 6139 // MapReduceValuesToIntTask!(K, V) rights, nextRight; 6140 // MapReduceValuesToIntTask 6141 // (BulkTask<K,V,?> p, int b, int i, int f, Node!(K, V)[] t, 6142 // MapReduceValuesToIntTask!(K, V) nextRight, 6143 // ToIntFunction<? super V> transformer, 6144 // int basis, 6145 // IntBinaryOperator reducer) { 6146 // super(p, b, i, f, t); this.nextRight = nextRight; 6147 // this.transformer = transformer; 6148 // this.basis = basis; this.reducer = reducer; 6149 // } 6150 // public final Integer getRawResult() { return result; } 6151 // public final void compute() { 6152 // final ToIntFunction<? super V> transformer; 6153 // final IntBinaryOperator reducer; 6154 // if ((transformer = this.transformer) != null && 6155 // (reducer = this.reducer) != null) { 6156 // int r = this.basis; 6157 // for (int i = baseIndex, f, h; batch > 0 && 6158 // (h = ((f = baseLimit) + i) >>> 1) > i;) { 6159 // addToPendingCount(1); 6160 // (rights = new MapReduceValuesToIntTask!(K, V) 6161 // (this, batch >>>= 1, baseLimit = h, f, tab, 6162 // rights, transformer, r, reducer)).fork(); 6163 // } 6164 // for (Node!(K, V) p; (p = advance()) != null; ) 6165 // r = reducer.applyAsInt(r, transformer.applyAsInt(p.val)); 6166 // result = r; 6167 // CountedCompleter<?> c; 6168 // for (c = firstComplete(); c != null; c = c.nextComplete()) { 6169 6170 // MapReduceValuesToIntTask!(K, V) 6171 // t = (MapReduceValuesToIntTask!(K, V))c, 6172 // s = t.rights; 6173 // while (s != null) { 6174 // t.result = reducer.applyAsInt(t.result, s.result); 6175 // s = t.rights = s.nextRight; 6176 // } 6177 // } 6178 // } 6179 // } 6180 // } 6181 6182 // @SuppressWarnings("serial") 6183 // static final class MapReduceEntriesToIntTask!(K, V) 6184 // extends BulkTask<K,V,Integer> { 6185 // final ToIntFunction<Map.Entry!(K, V)> transformer; 6186 // final IntBinaryOperator reducer; 6187 // final int basis; 6188 // int result; 6189 // MapReduceEntriesToIntTask!(K, V) rights, nextRight; 6190 // MapReduceEntriesToIntTask 6191 // (BulkTask<K,V,?> p, int b, int i, int f, Node!(K, V)[] t, 6192 // MapReduceEntriesToIntTask!(K, V) nextRight, 6193 // ToIntFunction<Map.Entry!(K, V)> transformer, 6194 // int basis, 6195 // IntBinaryOperator reducer) { 6196 // super(p, b, i, f, t); this.nextRight = nextRight; 6197 // this.transformer = transformer; 6198 // this.basis = basis; this.reducer = reducer; 6199 // } 6200 // public final Integer getRawResult() { return result; } 6201 // public final void compute() { 6202 // final ToIntFunction<Map.Entry!(K, V)> transformer; 6203 // final IntBinaryOperator reducer; 6204 // if ((transformer = this.transformer) != null && 6205 // (reducer = this.reducer) != null) { 6206 // int r = this.basis; 6207 // for (int i = baseIndex, f, h; batch > 0 && 6208 // (h = ((f = baseLimit) + i) >>> 1) > i;) { 6209 // addToPendingCount(1); 6210 // (rights = new MapReduceEntriesToIntTask!(K, V) 6211 // (this, batch >>>= 1, baseLimit = h, f, tab, 6212 // rights, transformer, r, reducer)).fork(); 6213 // } 6214 // for (Node!(K, V) p; (p = advance()) != null; ) 6215 // r = reducer.applyAsInt(r, transformer.applyAsInt(p)); 6216 // result = r; 6217 // CountedCompleter<?> c; 6218 // for (c = firstComplete(); c != null; c = c.nextComplete()) { 6219 6220 // MapReduceEntriesToIntTask!(K, V) 6221 // t = (MapReduceEntriesToIntTask!(K, V))c, 6222 // s = t.rights; 6223 // while (s != null) { 6224 // t.result = reducer.applyAsInt(t.result, s.result); 6225 // s = t.rights = s.nextRight; 6226 // } 6227 // } 6228 // } 6229 // } 6230 // } 6231 6232 // static final class MapReduceMappingsToIntTask!(K, V) 6233 // extends BulkTask<K,V,Integer> { 6234 // final ToIntBiFunction<? super K, ? super V> transformer; 6235 // final IntBinaryOperator reducer; 6236 // final int basis; 6237 // int result; 6238 // MapReduceMappingsToIntTask!(K, V) rights, nextRight; 6239 // MapReduceMappingsToIntTask 6240 // (BulkTask<K,V,?> p, int b, int i, int f, Node!(K, V)[] t, 6241 // MapReduceMappingsToIntTask!(K, V) nextRight, 6242 // ToIntBiFunction<? super K, ? super V> transformer, 6243 // int basis, 6244 // IntBinaryOperator reducer) { 6245 // super(p, b, i, f, t); this.nextRight = nextRight; 6246 // this.transformer = transformer; 6247 // this.basis = basis; this.reducer = reducer; 6248 // } 6249 // public final Integer getRawResult() { return result; } 6250 // public final void compute() { 6251 // final ToIntBiFunction<? super K, ? super V> transformer; 6252 // final IntBinaryOperator reducer; 6253 // if ((transformer = this.transformer) != null && 6254 // (reducer = this.reducer) != null) { 6255 // int r = this.basis; 6256 // for (int i = baseIndex, f, h; batch > 0 && 6257 // (h = ((f = baseLimit) + i) >>> 1) > i;) { 6258 // addToPendingCount(1); 6259 // (rights = new MapReduceMappingsToIntTask!(K, V) 6260 // (this, batch >>>= 1, baseLimit = h, f, tab, 6261 // rights, transformer, r, reducer)).fork(); 6262 // } 6263 // for (Node!(K, V) p; (p = advance()) != null; ) 6264 // r = reducer.applyAsInt(r, transformer.applyAsInt(p.key, p.val)); 6265 // result = r; 6266 // CountedCompleter<?> c; 6267 // for (c = firstComplete(); c != null; c = c.nextComplete()) { 6268 6269 // MapReduceMappingsToIntTask!(K, V) 6270 // t = (MapReduceMappingsToIntTask!(K, V))c, 6271 // s = t.rights; 6272 // while (s != null) { 6273 // t.result = reducer.applyAsInt(t.result, s.result); 6274 // s = t.rights = s.nextRight; 6275 // } 6276 // } 6277 // } 6278 // } 6279 // } 6280 6281 // // Unsafe mechanics 6282 // // private static final Unsafe U = Unsafe.getUnsafe(); 6283 // // private static final long SIZECTL = U.objectFieldOffset( 6284 // // ConcurrentHashMap.class, "sizeCtl"); 6285 // // private static final long TRANSFERINDEX = U.objectFieldOffset( 6286 // // ConcurrentHashMap.class, "transferIndex"); 6287 // // private static final long BASECOUNT = U.objectFieldOffset( 6288 // // ConcurrentHashMap.class, "baseCount"); 6289 // // private static final long CELLSBUSY = U.objectFieldOffset( 6290 // // ConcurrentHashMap.class, "cellsBusy"); 6291 // // private static final long CELLVALUE = U.objectFieldOffset( 6292 // // CounterCell.class, "value"); 6293 // // private static final int ABASE = U.arrayBaseOffset(Node[].class); 6294 // // private static final int ASHIFT; 6295 6296 // // static { 6297 // // int scale = U.arrayIndexScale(Node[].class); 6298 // // if ((scale & (scale - 1)) != 0) 6299 // // throw new ExceptionInInitializerError("array index scale not a power of two"); 6300 // // ASHIFT = 31 - Integer.numberOfLeadingZeros(scale); 6301 6302 // // // Reduce the risk of rare disastrous classloading in first call to 6303 // // // LockSupport.park: https://bugs.openjdk.java.net/browse/JDK-8074773 6304 // // Class<?> ensureLoaded = LockSupport.class; 6305 6306 // // // Eager class load observed to help JIT during startup 6307 // // ensureLoaded = ReservationNode.class; 6308 // // } 6309 // }