diff --git a/patches/server/0007-ConcurrentUtil.patch b/patches/server/0007-ConcurrentUtil.patch
index b5d70f9b38..83edf8ab9d 100644
--- a/patches/server/0007-ConcurrentUtil.patch
+++ b/patches/server/0007-ConcurrentUtil.patch
@@ -6,14 +6,15 @@ Subject: [PATCH] ConcurrentUtil
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/collection/MultiThreadedQueue.java b/src/main/java/ca/spottedleaf/concurrentutil/collection/MultiThreadedQueue.java
new file mode 100644
-index 0000000000000000000000000000000000000000..f4415f782b32fed25da98e44b172f717c4d46e34
+index 0000000000000000000000000000000000000000..f84a622dc29750139ac280f480b7cd132b036287
--- /dev/null
+++ b/src/main/java/ca/spottedleaf/concurrentutil/collection/MultiThreadedQueue.java
-@@ -0,0 +1,1402 @@
+@@ -0,0 +1,1421 @@
+package ca.spottedleaf.concurrentutil.collection;
+
+import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
+import ca.spottedleaf.concurrentutil.util.Validate;
++
+import java.lang.invoke.VarHandle;
+import java.util.ArrayList;
+import java.util.Collection;
@@ -405,6 +406,24 @@ index 0000000000000000000000000000000000000000..f4415f782b32fed25da98e44b172f717
+ }
+
+ /**
++ * Returns whether this queue is currently add-blocked. That is, whether {@link #add(Object)} and friends will return {@code false}.
++ */
++ public boolean isAddBlocked() {
++ for (LinkedNode tail = this.getTailOpaque();;) {
++ LinkedNode next = tail.getNextVolatile();
++ if (next == null) {
++ return false;
++ }
++
++ if (next == tail) {
++ return true;
++ }
++
++ tail = next;
++ }
++ }
++
++ /**
+ * Atomically removes the head from this queue if it exists, otherwise prevents additions to this queue if no
+ * head is removed.
+ *
@@ -1414,14 +1433,15 @@ index 0000000000000000000000000000000000000000..f4415f782b32fed25da98e44b172f717
+}
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/collection/SRSWLinkedQueue.java b/src/main/java/ca/spottedleaf/concurrentutil/collection/SRSWLinkedQueue.java
new file mode 100644
-index 0000000000000000000000000000000000000000..597659f38aa816646dcda4ca39c002b6d9f9a792
+index 0000000000000000000000000000000000000000..094eff418b4e3bffce020d650931b4d9e58fa9ed
--- /dev/null
+++ b/src/main/java/ca/spottedleaf/concurrentutil/collection/SRSWLinkedQueue.java
-@@ -0,0 +1,148 @@
+@@ -0,0 +1,149 @@
+package ca.spottedleaf.concurrentutil.collection;
+
+import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
+import ca.spottedleaf.concurrentutil.util.Validate;
++
+import java.lang.invoke.VarHandle;
+import java.util.ConcurrentModificationException;
+
@@ -1568,22 +1588,22 @@ index 0000000000000000000000000000000000000000..597659f38aa816646dcda4ca39c002b6
+}
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/completable/Completable.java b/src/main/java/ca/spottedleaf/concurrentutil/completable/Completable.java
new file mode 100644
-index 0000000000000000000000000000000000000000..a1ad3308f9c3545a604b635896259a1cd3382b2a
+index 0000000000000000000000000000000000000000..46d1bd01542ebeeffc0006a5c585a50dbbbff907
--- /dev/null
+++ b/src/main/java/ca/spottedleaf/concurrentutil/completable/Completable.java
-@@ -0,0 +1,98 @@
+@@ -0,0 +1,112 @@
+package ca.spottedleaf.concurrentutil.completable;
+
+import ca.spottedleaf.concurrentutil.collection.MultiThreadedQueue;
+import ca.spottedleaf.concurrentutil.executor.Cancellable;
+import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
-+import com.mojang.logging.LogUtils;
+import org.slf4j.Logger;
++import org.slf4j.LoggerFactory;
+import java.util.function.BiConsumer;
+
+public final class Completable {
+
-+ private static final Logger LOGGER = LogUtils.getLogger();
++ private static final Logger LOGGER = LoggerFactory.getLogger(Completable.class);
+
+ private final MultiThreadedQueue> waiters = new MultiThreadedQueue<>();
+ private T result;
@@ -1610,6 +1630,13 @@ index 0000000000000000000000000000000000000000..a1ad3308f9c3545a604b635896259a1c
+ return this.throwable;
+ }
+
++ /**
++ * Adds a waiter that should only be completed asynchronously by the complete() calls. If complete()
++ * has already been called, returns {@code null} and does not invoke the specified consumer.
++ * @param consumer Consumer to be executed on completion
++ * @throws NullPointerException If consumer is null
++ * @return A cancellable which will control the execution of the specified consumer
++ */
+ public Cancellable addAsynchronousWaiter(final BiConsumer consumer) {
+ if (this.waiters.add(consumer)) {
+ return new CancellableImpl(consumer);
@@ -1635,6 +1662,13 @@ index 0000000000000000000000000000000000000000..a1ad3308f9c3545a604b635896259a1c
+ }
+ }
+
++ /**
++ * Adds a waiter that will be completed asynchronously by the complete() calls. If complete()
++ * has already been called, then invokes the consumer synchronously with the completed result.
++ * @param consumer Consumer to be executed on completion
++ * @throws NullPointerException If consumer is null
++ * @return A cancellable which will control the execution of the specified consumer
++ */
+ public Cancellable addWaiter(final BiConsumer consumer) {
+ if (this.waiters.add(consumer)) {
+ return new CancellableImpl(consumer);
@@ -1672,16 +1706,32 @@ index 0000000000000000000000000000000000000000..a1ad3308f9c3545a604b635896259a1c
+}
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/executor/BaseExecutor.java b/src/main/java/ca/spottedleaf/concurrentutil/executor/BaseExecutor.java
new file mode 100644
-index 0000000000000000000000000000000000000000..8c452b0988da4725762d543f6bee09915c328ae6
+index 0000000000000000000000000000000000000000..18d646676fd022afd64afaac30ec1bd283a73b0e
--- /dev/null
+++ b/src/main/java/ca/spottedleaf/concurrentutil/executor/BaseExecutor.java
-@@ -0,0 +1,198 @@
+@@ -0,0 +1,208 @@
+package ca.spottedleaf.concurrentutil.executor;
+
-+import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
+import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
+import java.util.function.BooleanSupplier;
+
++/**
++ * Base implementation for an abstract queue of tasks which are executed either synchronously or asynchronously.
++ *
++ *
++ * The implementation supports tracking task executions using {@link #getTotalTasksScheduled()} and
++ * {@link #getTotalTasksExecuted()}, and optionally shutting down the executor using {@link #shutdown()}
++ *
++ *
++ *
++ * The base implementation does not provide a method to queue a task for execution, rather that is specified in
++ * the specific implementation. However, it is required that a specific implementation provides a method to
++ * queue a task or create a task. A queued task is one which will eventually be executed,
++ * and a created task must be queued to execute via {@link BaseTask#queue()} or be executed manually via
++ * {@link BaseTask#execute()}. This choice of delaying the queueing of a task may be useful to provide a task handle
++ * which may be cancelled or adjusted before the actual real task logic is ready to be executed.
++ *
++ */
+public interface BaseExecutor {
+
+ /**
@@ -1710,7 +1760,6 @@ index 0000000000000000000000000000000000000000..8c452b0988da4725762d543f6bee0991
+ */
+ public long getTotalTasksExecuted();
+
-+
+ /**
+ * Waits until this queue has had all of its tasks executed (NOT removed). See {@link #haveAllTasksExecuted()}
+ *
@@ -1723,7 +1772,7 @@ index 0000000000000000000000000000000000000000..8c452b0988da4725762d543f6bee0991
+ * time than expected. Effectively, do not rely on this call being fast - even if there are few tasks scheduled.
+ *
+ *
-+ * Note: Interruptions to the the current thread have no effect. Interrupt status is also not affected by this cal.
++ * Note: Interruptions to the the current thread have no effect. Interrupt status is also not affected by this call.
+ *
+ *
+ * @throws IllegalStateException If the current thread is not allowed to wait
@@ -1739,17 +1788,6 @@ index 0000000000000000000000000000000000000000..8c452b0988da4725762d543f6bee0991
+
+ /**
+ * Executes the next available task.
-+ *
-+ * If there is a task with priority {@link PrioritisedExecutor.Priority#BLOCKING} available, then that such task is executed.
-+ *
-+ *
-+ * If there is a task with priority {@link PrioritisedExecutor.Priority#IDLE} available then that task is only executed
-+ * when there are no other tasks available with a higher priority.
-+ *
-+ *
-+ * If there are no tasks that have priority {@link PrioritisedExecutor.Priority#BLOCKING} or {@link PrioritisedExecutor.Priority#IDLE}, then
-+ * this function will be biased to execute tasks that have higher priorities.
-+ *
+ *
+ * @return {@code true} if a task was executed, {@code false} otherwise
+ * @throws IllegalStateException If the current thread is not allowed to execute a task
@@ -1791,12 +1829,12 @@ index 0000000000000000000000000000000000000000..8c452b0988da4725762d543f6bee0991
+ }
+
+ /**
-+ * Waits and executes tasks until the condition returns {@code true} or {@code System.nanoTime() >= deadline}.
++ * Waits and executes tasks until the condition returns {@code true} or {@code System.nanoTime() - deadline >= 0}.
+ */
+ public default void executeConditionally(final BooleanSupplier condition, final long deadline) {
+ long failures = 0;
+ // double check deadline; we don't know how expensive the condition is
-+ while ((System.nanoTime() < deadline) && !condition.getAsBoolean() && (System.nanoTime() < deadline)) {
++ while ((System.nanoTime() - deadline < 0L) && !condition.getAsBoolean() && (System.nanoTime() - deadline < 0L)) {
+ if (this.executeTask()) {
+ failures = failures >>> 2;
+ } else {
@@ -1806,11 +1844,11 @@ index 0000000000000000000000000000000000000000..8c452b0988da4725762d543f6bee0991
+ }
+
+ /**
-+ * Waits and executes tasks until {@code System.nanoTime() >= deadline}.
++ * Waits and executes tasks until {@code System.nanoTime() - deadline >= 0}.
+ */
+ public default void executeUntil(final long deadline) {
+ long failures = 0;
-+ while (System.nanoTime() < deadline) {
++ while (System.nanoTime() - deadline < 0L) {
+ if (this.executeTask()) {
+ failures = failures >>> 2;
+ } else {
@@ -1831,6 +1869,7 @@ index 0000000000000000000000000000000000000000..8c452b0988da4725762d543f6bee0991
+ *
+ * @return {@code true} if the queue was shutdown, {@code false} if it has shut down already
+ * @throws UnsupportedOperationException If this queue does not support shutdown
++ * @see #isShutdown()
+ */
+ public default boolean shutdown() throws UnsupportedOperationException {
+ throw new UnsupportedOperationException();
@@ -1838,13 +1877,18 @@ index 0000000000000000000000000000000000000000..8c452b0988da4725762d543f6bee0991
+
+ /**
+ * Returns whether this queue has shut down. Effectively, whether new tasks will be rejected - this method
-+ * does not indicate whether all of the tasks scheduled have been executed.
++ * does not indicate whether all the tasks scheduled have been executed.
+ * @return Returns whether this queue has shut down.
++ * @see #waitUntilAllExecuted()
+ */
+ public default boolean isShutdown() {
+ return false;
+ }
+
++ /**
++ * Task object returned for any {@link BaseExecutor} scheduled task.
++ * @see BaseExecutor
++ */
+ public static interface BaseTask extends Cancellable {
+
+ /**
@@ -2072,14 +2116,18 @@ index 0000000000000000000000000000000000000000..3ce10053d4ec51855ad7012abb5d97df
+}
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/executor/standard/PrioritisedExecutor.java b/src/main/java/ca/spottedleaf/concurrentutil/executor/standard/PrioritisedExecutor.java
new file mode 100644
-index 0000000000000000000000000000000000000000..e5d8ff730ba9d83efc2d80782de313a718bf55b3
+index 0000000000000000000000000000000000000000..91beb6f23f257cf265fe3150f760892e605f217a
--- /dev/null
+++ b/src/main/java/ca/spottedleaf/concurrentutil/executor/standard/PrioritisedExecutor.java
-@@ -0,0 +1,246 @@
+@@ -0,0 +1,276 @@
+package ca.spottedleaf.concurrentutil.executor.standard;
+
+import ca.spottedleaf.concurrentutil.executor.BaseExecutor;
+
++/**
++ * Implementation of {@link BaseExecutor} which schedules tasks to be executed by a given priority.
++ * @see BaseExecutor
++ */
+public interface PrioritisedExecutor extends BaseExecutor {
+
+ public static enum Priority {
@@ -2141,12 +2189,12 @@ index 0000000000000000000000000000000000000000..e5d8ff730ba9d83efc2d80782de313a7
+ }
+
+ // returns the higher priority of the two
-+ public static PrioritisedExecutor.Priority max(final Priority p1, final Priority p2) {
++ public static Priority max(final Priority p1, final Priority p2) {
+ return p1.isHigherOrEqualPriority(p2) ? p1 : p2;
+ }
+
+ // returns the lower priroity of the two
-+ public static PrioritisedExecutor.Priority min(final Priority p1, final Priority p2) {
++ public static Priority min(final Priority p1, final Priority p2) {
+ return p1.isLowerOrEqualPriority(p2) ? p1 : p2;
+ }
+
@@ -2198,14 +2246,14 @@ index 0000000000000000000000000000000000000000..e5d8ff730ba9d83efc2d80782de313a7
+ return priority > than;
+ }
+
-+ static final PrioritisedExecutor.Priority[] PRIORITIES = PrioritisedExecutor.Priority.values();
++ static final Priority[] PRIORITIES = Priority.values();
+
+ /** includes special priorities */
+ public static final int TOTAL_PRIORITIES = PRIORITIES.length;
+
+ public static final int TOTAL_SCHEDULABLE_PRIORITIES = TOTAL_PRIORITIES - 1;
+
-+ public static PrioritisedExecutor.Priority getPriority(final int priority) {
++ public static Priority getPriority(final int priority) {
+ return PRIORITIES[priority + 1];
+ }
+
@@ -2227,6 +2275,26 @@ index 0000000000000000000000000000000000000000..e5d8ff730ba9d83efc2d80782de313a7
+ }
+
+ /**
++ * Executes the next available task.
++ *
++ * If there is a task with priority {@link PrioritisedExecutor.Priority#BLOCKING} available, then that such task is executed.
++ *
++ *
++ * If there is a task with priority {@link PrioritisedExecutor.Priority#IDLE} available then that task is only executed
++ * when there are no other tasks available with a higher priority.
++ *
++ *
++ * If there are no tasks that have priority {@link PrioritisedExecutor.Priority#BLOCKING} or {@link PrioritisedExecutor.Priority#IDLE}, then
++ * this function will be biased to execute tasks that have higher priorities.
++ *
++ *
++ * @return {@code true} if a task was executed, {@code false} otherwise
++ * @throws IllegalStateException If the current thread is not allowed to execute a task
++ */
++ @Override
++ public boolean executeTask() throws IllegalStateException;
++
++ /**
+ * Queues or executes a task at {@link Priority#NORMAL} priority.
+ * @param task The task to run.
+ *
@@ -2236,7 +2304,7 @@ index 0000000000000000000000000000000000000000..e5d8ff730ba9d83efc2d80782de313a7
+ * associated with the parameter
+ */
+ public default PrioritisedTask queueRunnable(final Runnable task) {
-+ return this.queueRunnable(task, PrioritisedExecutor.Priority.NORMAL);
++ return this.queueRunnable(task, Priority.NORMAL);
+ }
+
+ /**
@@ -2251,10 +2319,10 @@ index 0000000000000000000000000000000000000000..e5d8ff730ba9d83efc2d80782de313a7
+ * @return {@code null} if the current thread immediately executed the task, else returns the prioritised task
+ * associated with the parameter
+ */
-+ public PrioritisedTask queueRunnable(final Runnable task, final PrioritisedExecutor.Priority priority);
++ public PrioritisedTask queueRunnable(final Runnable task, final Priority priority);
+
+ /**
-+ * Creates, but does not execute or queue the task. The task must later be queued via {@link BaseExecutor.BaseTask#queue()}.
++ * Creates, but does not execute or queue the task. The task must later be queued via {@link BaseTask#queue()}.
+ *
+ * @param task The task to run.
+ *
@@ -2264,12 +2332,12 @@ index 0000000000000000000000000000000000000000..e5d8ff730ba9d83efc2d80782de313a7
+ * @throws UnsupportedOperationException If this executor does not support lazily queueing tasks
+ * @return The prioritised task associated with the parameters
+ */
-+ public default PrioritisedExecutor.PrioritisedTask createTask(final Runnable task) {
-+ return this.createTask(task, PrioritisedExecutor.Priority.NORMAL);
++ public default PrioritisedTask createTask(final Runnable task) {
++ return this.createTask(task, Priority.NORMAL);
+ }
+
+ /**
-+ * Creates, but does not execute or queue the task. The task must later be queued via {@link BaseExecutor.BaseTask#queue()}.
++ * Creates, but does not execute or queue the task. The task must later be queued via {@link BaseTask#queue()}.
+ *
+ * @param task The task to run.
+ * @param priority The priority for the task.
@@ -2280,15 +2348,21 @@ index 0000000000000000000000000000000000000000..e5d8ff730ba9d83efc2d80782de313a7
+ * @throws UnsupportedOperationException If this executor does not support lazily queueing tasks
+ * @return The prioritised task associated with the parameters
+ */
-+ public PrioritisedExecutor.PrioritisedTask createTask(final Runnable task, final PrioritisedExecutor.Priority priority);
++ public PrioritisedTask createTask(final Runnable task, final Priority priority);
+
++ /**
++ * Extension of {@link ca.spottedleaf.concurrentutil.executor.BaseExecutor.BaseTask} which adds functions
++ * to retrieve and modify the task's associated priority.
++ *
++ * @see ca.spottedleaf.concurrentutil.executor.BaseExecutor.BaseTask
++ */
+ public static interface PrioritisedTask extends BaseTask {
+
+ /**
-+ * Returns the current priority. Note that {@link PrioritisedExecutor.Priority#COMPLETING} will be returned
++ * Returns the current priority. Note that {@link Priority#COMPLETING} will be returned
+ * if this task is completing or has completed.
+ */
-+ public PrioritisedExecutor.Priority getPriority();
++ public Priority getPriority();
+
+ /**
+ * Attempts to set this task's priority level to the level specified.
@@ -2299,7 +2373,7 @@ index 0000000000000000000000000000000000000000..e5d8ff730ba9d83efc2d80782de313a7
+ * @return {@code true} if successful, {@code false} if this task is completing or has completed or the queue
+ * this task was scheduled on was shutdown, or if the priority was already at the specified level.
+ */
-+ public boolean setPriority(final PrioritisedExecutor.Priority priority);
++ public boolean setPriority(final Priority priority);
+
+ /**
+ * Attempts to raise the priority to the priority level specified.
@@ -2309,7 +2383,7 @@ index 0000000000000000000000000000000000000000..e5d8ff730ba9d83efc2d80782de313a7
+ * @throws IllegalArgumentException If the priority is invalid
+ * @return {@code false} if the current task is completing, {@code true} if the priority was raised to the specified level or was already at the specified level or higher.
+ */
-+ public boolean raisePriority(final PrioritisedExecutor.Priority priority);
++ public boolean raisePriority(final Priority priority);
+
+ /**
+ * Attempts to lower the priority to the priority level specified.
@@ -2319,20 +2393,20 @@ index 0000000000000000000000000000000000000000..e5d8ff730ba9d83efc2d80782de313a7
+ * @throws IllegalArgumentException If the priority is invalid
+ * @return {@code false} if the current task is completing, {@code true} if the priority was lowered to the specified level or was already at the specified level or lower.
+ */
-+ public boolean lowerPriority(final PrioritisedExecutor.Priority priority);
++ public boolean lowerPriority(final Priority priority);
+ }
+}
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/executor/standard/PrioritisedQueueExecutorThread.java b/src/main/java/ca/spottedleaf/concurrentutil/executor/standard/PrioritisedQueueExecutorThread.java
new file mode 100644
-index 0000000000000000000000000000000000000000..91fe0f7049122f62f05ba09c24cba5d758340cff
+index 0000000000000000000000000000000000000000..d1683ba6350e530373944f98192c0f2baf241e70
--- /dev/null
+++ b/src/main/java/ca/spottedleaf/concurrentutil/executor/standard/PrioritisedQueueExecutorThread.java
-@@ -0,0 +1,297 @@
+@@ -0,0 +1,301 @@
+package ca.spottedleaf.concurrentutil.executor.standard;
+
+import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
-+import com.mojang.logging.LogUtils;
+import org.slf4j.Logger;
++import org.slf4j.LoggerFactory;
+import java.lang.invoke.VarHandle;
+import java.util.concurrent.locks.LockSupport;
+
@@ -2347,14 +2421,14 @@ index 0000000000000000000000000000000000000000..91fe0f7049122f62f05ba09c24cba5d7
+ */
+public class PrioritisedQueueExecutorThread extends Thread implements PrioritisedExecutor {
+
-+ private static final Logger LOGGER = LogUtils.getLogger();
++ private static final Logger LOGGER = LoggerFactory.getLogger(PrioritisedQueueExecutorThread.class);
+
+ protected final PrioritisedExecutor queue;
+
+ protected volatile boolean threadShutdown;
+
-+ protected static final VarHandle THREAD_PARKED_HANDLE = ConcurrentUtil.getVarHandle(PrioritisedQueueExecutorThread.class, "threadParked", boolean.class);
+ protected volatile boolean threadParked;
++ protected static final VarHandle THREAD_PARKED_HANDLE = ConcurrentUtil.getVarHandle(PrioritisedQueueExecutorThread.class, "threadParked", boolean.class);
+
+ protected volatile boolean halted;
+
@@ -2429,6 +2503,10 @@ index 0000000000000000000000000000000000000000..91fe0f7049122f62f05ba09c24cba5d7
+ }
+ }
+
++ /**
++ * Attempts to poll as many tasks as possible, returning when finished.
++ * @return Whether any tasks were executed.
++ */
+ protected boolean pollTasks() {
+ boolean ret = false;
+
@@ -2473,7 +2551,7 @@ index 0000000000000000000000000000000000000000..91fe0f7049122f62f05ba09c24cba5d7
+
+ @Override
+ public PrioritisedTask createTask(final Runnable task, final Priority priority) {
-+ final PrioritisedExecutor.PrioritisedTask queueTask = this.queue.createTask(task, priority);
++ final PrioritisedTask queueTask = this.queue.createTask(task, priority);
+
+ // need to override queue() to notify us of tasks
+ return new PrioritisedTask() {
@@ -2519,8 +2597,8 @@ index 0000000000000000000000000000000000000000..91fe0f7049122f62f05ba09c24cba5d7
+ }
+
+ @Override
-+ public PrioritisedExecutor.PrioritisedTask queueRunnable(final Runnable task, final PrioritisedExecutor.Priority priority) {
-+ final PrioritisedExecutor.PrioritisedTask ret = this.queue.queueRunnable(task, priority);
++ public PrioritisedTask queueRunnable(final Runnable task, final Priority priority) {
++ final PrioritisedTask ret = this.queue.queueRunnable(task, priority);
+
+ this.notifyTasks();
+
@@ -2627,15 +2705,15 @@ index 0000000000000000000000000000000000000000..91fe0f7049122f62f05ba09c24cba5d7
+}
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/executor/standard/PrioritisedThreadPool.java b/src/main/java/ca/spottedleaf/concurrentutil/executor/standard/PrioritisedThreadPool.java
new file mode 100644
-index 0000000000000000000000000000000000000000..26fa2caa18a9194e57574a4a7fa9f7a4265740e0
+index 0000000000000000000000000000000000000000..2ba36e29d0d8693f2f5e6c6d195ca27f2a5099aa
--- /dev/null
+++ b/src/main/java/ca/spottedleaf/concurrentutil/executor/standard/PrioritisedThreadPool.java
-@@ -0,0 +1,579 @@
+@@ -0,0 +1,632 @@
+package ca.spottedleaf.concurrentutil.executor.standard;
+
-+import com.mojang.logging.LogUtils;
+import it.unimi.dsi.fastutil.objects.ReferenceOpenHashSet;
+import org.slf4j.Logger;
++import org.slf4j.LoggerFactory;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Comparator;
@@ -2645,30 +2723,46 @@ index 0000000000000000000000000000000000000000..26fa2caa18a9194e57574a4a7fa9f7a4
+
+public final class PrioritisedThreadPool {
+
-+ private static final Logger LOGGER = LogUtils.getLogger();
++ private static final Logger LOGGER = LoggerFactory.getLogger(PrioritisedThreadPool.class);
+
-+ protected final PrioritisedThread[] threads;
-+ protected final TreeSet queues = new TreeSet<>(PrioritisedPoolExecutorImpl.comparator());
-+ protected final String name;
-+ protected final long queueMaxHoldTime;
++ private final PrioritisedThread[] threads;
++ private final TreeSet queues = new TreeSet<>(PrioritisedPoolExecutorImpl.comparator());
++ private final String name;
++ private final long queueMaxHoldTime;
+
-+ protected final ReferenceOpenHashSet nonShutdownQueues = new ReferenceOpenHashSet<>();
-+ protected final ReferenceOpenHashSet activeQueues = new ReferenceOpenHashSet<>();
++ private final ReferenceOpenHashSet nonShutdownQueues = new ReferenceOpenHashSet<>();
++ private final ReferenceOpenHashSet activeQueues = new ReferenceOpenHashSet<>();
+
-+ protected boolean shutdown;
++ private boolean shutdown;
+
-+ protected long schedulingIdGenerator;
++ private long schedulingIdGenerator;
+
-+ protected static final long DEFAULT_QUEUE_HOLD_TIME = (long)(5.0e6);
++ private static final long DEFAULT_QUEUE_HOLD_TIME = (long)(5.0e6);
+
++ /**
++ * @param name Specified debug name of this thread pool
++ * @param threads The number of threads to use
++ */
+ public PrioritisedThreadPool(final String name, final int threads) {
+ this(name, threads, null);
+ }
+
++ /**
++ * @param name Specified debug name of this thread pool
++ * @param threads The number of threads to use
++ * @param threadModifier Invoked for each created thread with its incremental id before starting them
++ */
+ public PrioritisedThreadPool(final String name, final int threads, final BiConsumer threadModifier) {
+ this(name, threads, threadModifier, DEFAULT_QUEUE_HOLD_TIME); // 5ms
+ }
+
++ /**
++ * @param name Specified debug name of this thread pool
++ * @param threads The number of threads to use
++ * @param threadModifier Invoked for each created thread with its incremental id before starting them
++ * @param queueHoldTime The maximum amount of time to spend executing tasks in a specific queue before attempting
++ * to switch to another queue, per thread
++ */
+ public PrioritisedThreadPool(final String name, final int threads, final BiConsumer threadModifier,
+ final long queueHoldTime) { // in ns
+ if (threads <= 0) {
@@ -2700,16 +2794,32 @@ index 0000000000000000000000000000000000000000..26fa2caa18a9194e57574a4a7fa9f7a4
+ }
+ }
+
++ /**
++ * Returns an array representing the threads backing this thread pool.
++ */
+ public Thread[] getThreads() {
+ return Arrays.copyOf(this.threads, this.threads.length, Thread[].class);
+ }
+
-+ public PrioritisedPoolExecutor createExecutor(final String name, final int parallelism) {
++ /**
++ * Creates and returns a {@link PrioritisedPoolExecutor} to schedule tasks onto. The returned executor will execute
++ * tasks on this thread pool only.
++ * @param name The debug name of the executor.
++ * @param minParallelism The minimum number of threads to be executing tasks from the returned executor
++ * before threads may be allocated to other queues in this thread pool.
++ * @param parallelism The maximum number of threads which may be executing tasks from the returned executor.
++ * @throws IllegalStateException If this thread pool is shut down
++ */
++ public PrioritisedPoolExecutor createExecutor(final String name, final int minParallelism, final int parallelism) {
+ synchronized (this.nonShutdownQueues) {
+ if (this.shutdown) {
+ throw new IllegalStateException("Queue is shutdown: " + this.toString());
+ }
-+ final PrioritisedPoolExecutorImpl ret = new PrioritisedPoolExecutorImpl(this, name, Math.min(Math.max(1, parallelism), this.threads.length));
++ final PrioritisedPoolExecutorImpl ret = new PrioritisedPoolExecutorImpl(
++ this, name,
++ Math.min(Math.max(1, parallelism), this.threads.length),
++ Math.min(Math.max(0, minParallelism), this.threads.length)
++ );
+
+ this.nonShutdownQueues.add(ret);
+
@@ -2805,6 +2915,12 @@ index 0000000000000000000000000000000000000000..26fa2caa18a9194e57574a4a7fa9f7a4
+ }
+ }
+
++ /**
++ * Shuts down this thread pool, optionally waiting for all tasks to be executed.
++ * This function will invoke {@link PrioritisedPoolExecutor#shutdown()} on all created executors on this
++ * thread pool.
++ * @param wait Whether to wait for tasks to be executed
++ */
+ public void shutdown(final boolean wait) {
+ final ArrayList queuesToShutdown;
+ synchronized (this.nonShutdownQueues) {
@@ -2965,12 +3081,14 @@ index 0000000000000000000000000000000000000000..26fa2caa18a9194e57574a4a7fa9f7a4
+
+ protected final String name;
+ protected final int maximumExecutors;
++ protected final int minimumExecutors;
+ protected boolean isQueued;
+
-+ public PrioritisedPoolExecutorImpl(final PrioritisedThreadPool pool, final String name, final int maximumExecutors) {
++ public PrioritisedPoolExecutorImpl(final PrioritisedThreadPool pool, final String name, final int maximumExecutors, final int minimumExecutors) {
+ this.pool = pool;
+ this.name = name;
+ this.maximumExecutors = maximumExecutors;
++ this.minimumExecutors = minimumExecutors;
+ }
+
+ public static Comparator comparator() {
@@ -2979,6 +3097,19 @@ index 0000000000000000000000000000000000000000..26fa2caa18a9194e57574a4a7fa9f7a4
+ return 0;
+ }
+
++ final int belowMin1 = p1.minimumExecutors - p1.concurrentExecutors;
++ final int belowMin2 = p2.minimumExecutors - p2.concurrentExecutors;
++
++ // test minimum executors
++ if (belowMin1 > 0 || belowMin2 > 0) {
++ // want the largest belowMin to be first
++ final int minCompare = Integer.compare(belowMin2, belowMin1);
++
++ if (minCompare != 0) {
++ return minCompare;
++ }
++ }
++
+ // prefer higher priority
+ final int priorityCompare = p1.scheduledPriority.ordinal() - p2.scheduledPriority.ordinal();
+ if (priorityCompare != 0) {
@@ -3212,7 +3343,7 @@ index 0000000000000000000000000000000000000000..26fa2caa18a9194e57574a4a7fa9f7a4
+}
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/executor/standard/PrioritisedThreadedTaskQueue.java b/src/main/java/ca/spottedleaf/concurrentutil/executor/standard/PrioritisedThreadedTaskQueue.java
new file mode 100644
-index 0000000000000000000000000000000000000000..b71404be2c82f7db35272b367af861e94d6c73d3
+index 0000000000000000000000000000000000000000..3e8401b1b1f833c4f01bc87059a2f48d761d989f
--- /dev/null
+++ b/src/main/java/ca/spottedleaf/concurrentutil/executor/standard/PrioritisedThreadedTaskQueue.java
@@ -0,0 +1,378 @@
@@ -3239,8 +3370,8 @@ index 0000000000000000000000000000000000000000..b71404be2c82f7db35272b367af861e9
+ protected long taskIdGenerator = 0;
+
+ @Override
-+ public PrioritisedExecutor.PrioritisedTask queueRunnable(final Runnable task, final PrioritisedExecutor.Priority priority) throws IllegalStateException, IllegalArgumentException {
-+ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ public PrioritisedExecutor.PrioritisedTask queueRunnable(final Runnable task, final Priority priority) throws IllegalStateException, IllegalArgumentException {
++ if (!Priority.isValidPriority(priority)) {
+ throw new IllegalArgumentException("Priority " + priority + " is invalid");
+ }
+ if (task == null) {
@@ -3273,7 +3404,7 @@ index 0000000000000000000000000000000000000000..b71404be2c82f7db35272b367af861e9
+
+ @Override
+ public PrioritisedExecutor.PrioritisedTask createTask(final Runnable task, final Priority priority) {
-+ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ if (!Priority.isValidPriority(priority)) {
+ throw new IllegalArgumentException("Priority " + priority + " is invalid");
+ }
+ if (task == null) {
@@ -3305,7 +3436,7 @@ index 0000000000000000000000000000000000000000..b71404be2c82f7db35272b367af861e9
+ return this.poll(Priority.IDLE);
+ }
+
-+ protected PrioritisedTask poll(final PrioritisedExecutor.Priority minPriority) {
++ protected PrioritisedTask poll(final Priority minPriority) {
+ final ArrayDeque[] queues = this.queues;
+ synchronized (queues) {
+ final int max = minPriority.priority;
@@ -3382,10 +3513,10 @@ index 0000000000000000000000000000000000000000..b71404be2c82f7db35272b367af861e9
+ protected static final long NOT_SCHEDULED_ID = -1L;
+
+ protected Runnable runnable;
-+ protected volatile PrioritisedExecutor.Priority priority;
++ protected volatile Priority priority;
+
-+ protected PrioritisedTask(final long id, final Runnable runnable, final PrioritisedExecutor.Priority priority, final PrioritisedThreadedTaskQueue queue) {
-+ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ protected PrioritisedTask(final long id, final Runnable runnable, final Priority priority, final PrioritisedThreadedTaskQueue queue) {
++ if (!Priority.isValidPriority(priority)) {
+ throw new IllegalArgumentException("Invalid priority " + priority);
+ }
+
@@ -3395,8 +3526,8 @@ index 0000000000000000000000000000000000000000..b71404be2c82f7db35272b367af861e9
+ this.id = id;
+ }
+
-+ protected PrioritisedTask(final Runnable runnable, final PrioritisedExecutor.Priority priority, final PrioritisedThreadedTaskQueue queue) {
-+ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ protected PrioritisedTask(final Runnable runnable, final Priority priority, final PrioritisedThreadedTaskQueue queue) {
++ if (!Priority.isValidPriority(priority)) {
+ throw new IllegalArgumentException("Invalid priority " + priority);
+ }
+
@@ -3417,8 +3548,8 @@ index 0000000000000000000000000000000000000000..b71404be2c82f7db35272b367af861e9
+ throw new IllegalStateException("Queue has shutdown");
+ }
+
-+ final PrioritisedExecutor.Priority priority = this.priority;
-+ if (priority == PrioritisedExecutor.Priority.COMPLETING) {
++ final Priority priority = this.priority;
++ if (priority == Priority.COMPLETING) {
+ return false;
+ }
+
@@ -3437,11 +3568,11 @@ index 0000000000000000000000000000000000000000..b71404be2c82f7db35272b367af861e9
+ }
+
+ protected boolean trySetCompleting(final int minPriority) {
-+ final PrioritisedExecutor.Priority oldPriority = this.priority;
-+ if (oldPriority != PrioritisedExecutor.Priority.COMPLETING && oldPriority.isHigherOrEqualPriority(minPriority)) {
-+ this.priority = PrioritisedExecutor.Priority.COMPLETING;
++ final Priority oldPriority = this.priority;
++ if (oldPriority != Priority.COMPLETING && oldPriority.isHigherOrEqualPriority(minPriority)) {
++ this.priority = Priority.COMPLETING;
+ if (this.id != NOT_SCHEDULED_ID) {
-+ this.queue.priorityChange(this, oldPriority, PrioritisedExecutor.Priority.COMPLETING);
++ this.queue.priorityChange(this, oldPriority, Priority.COMPLETING);
+ }
+ return true;
+ }
@@ -3450,19 +3581,19 @@ index 0000000000000000000000000000000000000000..b71404be2c82f7db35272b367af861e9
+ }
+
+ @Override
-+ public PrioritisedExecutor.Priority getPriority() {
++ public Priority getPriority() {
+ return this.priority;
+ }
+
+ @Override
-+ public boolean setPriority(final PrioritisedExecutor.Priority priority) {
-+ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ public boolean setPriority(final Priority priority) {
++ if (!Priority.isValidPriority(priority)) {
+ throw new IllegalArgumentException("Invalid priority " + priority);
+ }
+ synchronized (this.queue.queues) {
-+ final PrioritisedExecutor.Priority curr = this.priority;
++ final Priority curr = this.priority;
+
-+ if (curr == PrioritisedExecutor.Priority.COMPLETING) {
++ if (curr == Priority.COMPLETING) {
+ return false;
+ }
+
@@ -3483,15 +3614,15 @@ index 0000000000000000000000000000000000000000..b71404be2c82f7db35272b367af861e9
+ }
+
+ @Override
-+ public boolean raisePriority(final PrioritisedExecutor.Priority priority) {
-+ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ public boolean raisePriority(final Priority priority) {
++ if (!Priority.isValidPriority(priority)) {
+ throw new IllegalArgumentException("Invalid priority " + priority);
+ }
+
+ synchronized (this.queue.queues) {
-+ final PrioritisedExecutor.Priority curr = this.priority;
++ final Priority curr = this.priority;
+
-+ if (curr == PrioritisedExecutor.Priority.COMPLETING) {
++ if (curr == Priority.COMPLETING) {
+ return false;
+ }
+
@@ -3512,15 +3643,15 @@ index 0000000000000000000000000000000000000000..b71404be2c82f7db35272b367af861e9
+ }
+
+ @Override
-+ public boolean lowerPriority(final PrioritisedExecutor.Priority priority) {
-+ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ public boolean lowerPriority(final Priority priority) {
++ if (!Priority.isValidPriority(priority)) {
+ throw new IllegalArgumentException("Invalid priority " + priority);
+ }
+
+ synchronized (this.queue.queues) {
-+ final PrioritisedExecutor.Priority curr = this.priority;
++ final Priority curr = this.priority;
+
-+ if (curr == PrioritisedExecutor.Priority.COMPLETING) {
++ if (curr == Priority.COMPLETING) {
+ return false;
+ }
+
@@ -3545,14 +3676,14 @@ index 0000000000000000000000000000000000000000..b71404be2c82f7db35272b367af861e9
+ final long id;
+ synchronized (this.queue.queues) {
+ final Priority oldPriority = this.priority;
-+ if (oldPriority == PrioritisedExecutor.Priority.COMPLETING) {
++ if (oldPriority == Priority.COMPLETING) {
+ return false;
+ }
+
-+ this.priority = PrioritisedExecutor.Priority.COMPLETING;
++ this.priority = Priority.COMPLETING;
+ // call priority change callback
+ if ((id = this.id) != NOT_SCHEDULED_ID) {
-+ this.queue.priorityChange(this, oldPriority, PrioritisedExecutor.Priority.COMPLETING);
++ this.queue.priorityChange(this, oldPriority, Priority.COMPLETING);
+ }
+ }
+ this.runnable = null;
@@ -3578,14 +3709,14 @@ index 0000000000000000000000000000000000000000..b71404be2c82f7db35272b367af861e9
+ public boolean execute() {
+ synchronized (this.queue.queues) {
+ final Priority oldPriority = this.priority;
-+ if (oldPriority == PrioritisedExecutor.Priority.COMPLETING) {
++ if (oldPriority == Priority.COMPLETING) {
+ return false;
+ }
+
-+ this.priority = PrioritisedExecutor.Priority.COMPLETING;
++ this.priority = Priority.COMPLETING;
+ // call priority change callback
+ if (this.id != NOT_SCHEDULED_ID) {
-+ this.queue.priorityChange(this, oldPriority, PrioritisedExecutor.Priority.COMPLETING);
++ this.queue.priorityChange(this, oldPriority, Priority.COMPLETING);
+ }
+ }
+
@@ -3594,19 +3725,2090 @@ index 0000000000000000000000000000000000000000..b71404be2c82f7db35272b367af861e9
+ }
+ }
+}
-diff --git a/src/main/java/ca/spottedleaf/concurrentutil/map/SWMRHashTable.java b/src/main/java/ca/spottedleaf/concurrentutil/map/SWMRHashTable.java
+diff --git a/src/main/java/ca/spottedleaf/concurrentutil/function/BiLong1Function.java b/src/main/java/ca/spottedleaf/concurrentutil/function/BiLong1Function.java
new file mode 100644
-index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b9a40f9f5
+index 0000000000000000000000000000000000000000..94bfd7c56ffcea7d6491e94a7804bc3bd60fe9c3
--- /dev/null
-+++ b/src/main/java/ca/spottedleaf/concurrentutil/map/SWMRHashTable.java
-@@ -0,0 +1,1673 @@
++++ b/src/main/java/ca/spottedleaf/concurrentutil/function/BiLong1Function.java
+@@ -0,0 +1,8 @@
++package ca.spottedleaf.concurrentutil.function;
++
++@FunctionalInterface
++public interface BiLong1Function {
++
++ public R apply(final long t1, final T t2);
++
++}
+diff --git a/src/main/java/ca/spottedleaf/concurrentutil/function/BiLongObjectConsumer.java b/src/main/java/ca/spottedleaf/concurrentutil/function/BiLongObjectConsumer.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..8e7eef07960a18d0593688eba55adfa1c85efadf
+--- /dev/null
++++ b/src/main/java/ca/spottedleaf/concurrentutil/function/BiLongObjectConsumer.java
+@@ -0,0 +1,8 @@
++package ca.spottedleaf.concurrentutil.function;
++
++@FunctionalInterface
++public interface BiLongObjectConsumer {
++
++ public void accept(final long key, final V value);
++
++}
+diff --git a/src/main/java/ca/spottedleaf/concurrentutil/lock/ReentrantAreaLock.java b/src/main/java/ca/spottedleaf/concurrentutil/lock/ReentrantAreaLock.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..7ffe4379b06c03c56abbcbdee3bb720894a10702
+--- /dev/null
++++ b/src/main/java/ca/spottedleaf/concurrentutil/lock/ReentrantAreaLock.java
+@@ -0,0 +1,350 @@
++package ca.spottedleaf.concurrentutil.lock;
++
++import ca.spottedleaf.concurrentutil.collection.MultiThreadedQueue;
++import ca.spottedleaf.concurrentutil.map.ConcurrentLong2ReferenceChainedHashTable;
++import ca.spottedleaf.concurrentutil.util.IntPairUtil;
++import java.util.Objects;
++import java.util.concurrent.locks.LockSupport;
++
++public final class ReentrantAreaLock {
++
++ public final int coordinateShift;
++
++ // aggressive load factor to reduce contention
++ private final ConcurrentLong2ReferenceChainedHashTable nodes = ConcurrentLong2ReferenceChainedHashTable.createWithCapacity(128, 0.2f);
++
++ public ReentrantAreaLock(final int coordinateShift) {
++ this.coordinateShift = coordinateShift;
++ }
++
++ public boolean isHeldByCurrentThread(final int x, final int z) {
++ final Thread currThread = Thread.currentThread();
++ final int shift = this.coordinateShift;
++ final int sectionX = x >> shift;
++ final int sectionZ = z >> shift;
++
++ final long coordinate = IntPairUtil.key(sectionX, sectionZ);
++ final Node node = this.nodes.get(coordinate);
++
++ return node != null && node.thread == currThread;
++ }
++
++ public boolean isHeldByCurrentThread(final int centerX, final int centerZ, final int radius) {
++ return this.isHeldByCurrentThread(centerX - radius, centerZ - radius, centerX + radius, centerZ + radius);
++ }
++
++ public boolean isHeldByCurrentThread(final int fromX, final int fromZ, final int toX, final int toZ) {
++ if (fromX > toX || fromZ > toZ) {
++ throw new IllegalArgumentException();
++ }
++
++ final Thread currThread = Thread.currentThread();
++ final int shift = this.coordinateShift;
++ final int fromSectionX = fromX >> shift;
++ final int fromSectionZ = fromZ >> shift;
++ final int toSectionX = toX >> shift;
++ final int toSectionZ = toZ >> shift;
++
++ for (int currZ = fromSectionZ; currZ <= toSectionZ; ++currZ) {
++ for (int currX = fromSectionX; currX <= toSectionX; ++currX) {
++ final long coordinate = IntPairUtil.key(currX, currZ);
++
++ final Node node = this.nodes.get(coordinate);
++
++ if (node == null || node.thread != currThread) {
++ return false;
++ }
++ }
++ }
++
++ return true;
++ }
++
++ public Node tryLock(final int x, final int z) {
++ return this.tryLock(x, z, x, z);
++ }
++
++ public Node tryLock(final int centerX, final int centerZ, final int radius) {
++ return this.tryLock(centerX - radius, centerZ - radius, centerX + radius, centerZ + radius);
++ }
++
++ public Node tryLock(final int fromX, final int fromZ, final int toX, final int toZ) {
++ if (fromX > toX || fromZ > toZ) {
++ throw new IllegalArgumentException();
++ }
++
++ final Thread currThread = Thread.currentThread();
++ final int shift = this.coordinateShift;
++ final int fromSectionX = fromX >> shift;
++ final int fromSectionZ = fromZ >> shift;
++ final int toSectionX = toX >> shift;
++ final int toSectionZ = toZ >> shift;
++
++ final long[] areaAffected = new long[(toSectionX - fromSectionX + 1) * (toSectionZ - fromSectionZ + 1)];
++ int areaAffectedLen = 0;
++
++ final Node ret = new Node(this, areaAffected, currThread);
++
++ boolean failed = false;
++
++ // try to fast acquire area
++ for (int currZ = fromSectionZ; currZ <= toSectionZ; ++currZ) {
++ for (int currX = fromSectionX; currX <= toSectionX; ++currX) {
++ final long coordinate = IntPairUtil.key(currX, currZ);
++
++ final Node prev = this.nodes.putIfAbsent(coordinate, ret);
++
++ if (prev == null) {
++ areaAffected[areaAffectedLen++] = coordinate;
++ continue;
++ }
++
++ if (prev.thread != currThread) {
++ failed = true;
++ break;
++ }
++ }
++ }
++
++ if (!failed) {
++ return ret;
++ }
++
++ // failed, undo logic
++ if (areaAffectedLen != 0) {
++ for (int i = 0; i < areaAffectedLen; ++i) {
++ final long key = areaAffected[i];
++
++ if (this.nodes.remove(key) != ret) {
++ throw new IllegalStateException();
++ }
++ }
++
++ areaAffectedLen = 0;
++
++ // since we inserted, we need to drain waiters
++ Thread unpark;
++ while ((unpark = ret.pollOrBlockAdds()) != null) {
++ LockSupport.unpark(unpark);
++ }
++ }
++
++ return null;
++ }
++
++ public Node lock(final int x, final int z) {
++ final Thread currThread = Thread.currentThread();
++ final int shift = this.coordinateShift;
++ final int sectionX = x >> shift;
++ final int sectionZ = z >> shift;
++
++ final long coordinate = IntPairUtil.key(sectionX, sectionZ);
++ final long[] areaAffected = new long[1];
++ areaAffected[0] = coordinate;
++
++ final Node ret = new Node(this, areaAffected, currThread);
++
++ for (long failures = 0L;;) {
++ final Node park;
++
++ // try to fast acquire area
++ {
++ final Node prev = this.nodes.putIfAbsent(coordinate, ret);
++
++ if (prev == null) {
++ ret.areaAffectedLen = 1;
++ return ret;
++ } else if (prev.thread != currThread) {
++ park = prev;
++ } else {
++ // only one node we would want to acquire, and it's owned by this thread already
++ // areaAffectedLen = 0 already
++ return ret;
++ }
++ }
++
++ ++failures;
++
++ if (failures > 128L && park.add(currThread)) {
++ LockSupport.park();
++ } else {
++ // high contention, spin wait
++ if (failures < 128L) {
++ for (long i = 0; i < failures; ++i) {
++ Thread.onSpinWait();
++ }
++ failures = failures << 1;
++ } else if (failures < 1_200L) {
++ LockSupport.parkNanos(1_000L);
++ failures = failures + 1L;
++ } else { // scale 0.1ms (100us) per failure
++ Thread.yield();
++ LockSupport.parkNanos(100_000L * failures);
++ failures = failures + 1L;
++ }
++ }
++ }
++ }
++
++ public Node lock(final int centerX, final int centerZ, final int radius) {
++ return this.lock(centerX - radius, centerZ - radius, centerX + radius, centerZ + radius);
++ }
++
++ public Node lock(final int fromX, final int fromZ, final int toX, final int toZ) {
++ if (fromX > toX || fromZ > toZ) {
++ throw new IllegalArgumentException();
++ }
++
++ final Thread currThread = Thread.currentThread();
++ final int shift = this.coordinateShift;
++ final int fromSectionX = fromX >> shift;
++ final int fromSectionZ = fromZ >> shift;
++ final int toSectionX = toX >> shift;
++ final int toSectionZ = toZ >> shift;
++
++ if (((fromSectionX ^ toSectionX) | (fromSectionZ ^ toSectionZ)) == 0) {
++ return this.lock(fromX, fromZ);
++ }
++
++ final long[] areaAffected = new long[(toSectionX - fromSectionX + 1) * (toSectionZ - fromSectionZ + 1)];
++ int areaAffectedLen = 0;
++
++ final Node ret = new Node(this, areaAffected, currThread);
++
++ for (long failures = 0L;;) {
++ Node park = null;
++ boolean addedToArea = false;
++ boolean alreadyOwned = false;
++ boolean allOwned = true;
++
++ // try to fast acquire area
++ for (int currZ = fromSectionZ; currZ <= toSectionZ; ++currZ) {
++ for (int currX = fromSectionX; currX <= toSectionX; ++currX) {
++ final long coordinate = IntPairUtil.key(currX, currZ);
++
++ final Node prev = this.nodes.putIfAbsent(coordinate, ret);
++
++ if (prev == null) {
++ addedToArea = true;
++ allOwned = false;
++ areaAffected[areaAffectedLen++] = coordinate;
++ continue;
++ }
++
++ if (prev.thread != currThread) {
++ park = prev;
++ alreadyOwned = true;
++ break;
++ }
++ }
++ }
++
++ // check for failure
++ if ((park != null && addedToArea) || (park == null && alreadyOwned && !allOwned)) {
++ // failure to acquire: added and we need to block, or improper lock usage
++ for (int i = 0; i < areaAffectedLen; ++i) {
++ final long key = areaAffected[i];
++
++ if (this.nodes.remove(key) != ret) {
++ throw new IllegalStateException();
++ }
++ }
++
++ areaAffectedLen = 0;
++
++ // since we inserted, we need to drain waiters
++ Thread unpark;
++ while ((unpark = ret.pollOrBlockAdds()) != null) {
++ LockSupport.unpark(unpark);
++ }
++ }
++
++ if (park == null) {
++ if (alreadyOwned && !allOwned) {
++ throw new IllegalStateException("Improper lock usage: Should never acquire intersecting areas");
++ }
++ ret.areaAffectedLen = areaAffectedLen;
++ return ret;
++ }
++
++ // failed
++
++ ++failures;
++
++ if (failures > 128L && park.add(currThread)) {
++ LockSupport.park(park);
++ } else {
++ // high contention, spin wait
++ if (failures < 128L) {
++ for (long i = 0; i < failures; ++i) {
++ Thread.onSpinWait();
++ }
++ failures = failures << 1;
++ } else if (failures < 1_200L) {
++ LockSupport.parkNanos(1_000L);
++ failures = failures + 1L;
++ } else { // scale 0.1ms (100us) per failure
++ Thread.yield();
++ LockSupport.parkNanos(100_000L * failures);
++ failures = failures + 1L;
++ }
++ }
++
++ if (addedToArea) {
++ // try again, so we need to allow adds so that other threads can properly block on us
++ ret.allowAdds();
++ }
++ }
++ }
++
++ public void unlock(final Node node) {
++ if (node.lock != this) {
++ throw new IllegalStateException("Unlock target lock mismatch");
++ }
++
++ final long[] areaAffected = node.areaAffected;
++ final int areaAffectedLen = node.areaAffectedLen;
++
++ if (areaAffectedLen == 0) {
++ // here we are not in the node map, and so do not need to remove from the node map or unblock any waiters
++ return;
++ }
++
++ Objects.checkFromToIndex(0, areaAffectedLen, areaAffected.length);
++
++ // remove from node map; allowing other threads to lock
++ for (int i = 0; i < areaAffectedLen; ++i) {
++ final long coordinate = areaAffected[i];
++ if (this.nodes.remove(coordinate, node) != node) {
++ throw new IllegalStateException();
++ }
++ }
++
++ Thread unpark;
++ while ((unpark = node.pollOrBlockAdds()) != null) {
++ LockSupport.unpark(unpark);
++ }
++ }
++
++ public static final class Node extends MultiThreadedQueue {
++
++ private final ReentrantAreaLock lock;
++ private final long[] areaAffected;
++ private int areaAffectedLen;
++ private final Thread thread;
++
++ private Node(final ReentrantAreaLock lock, final long[] areaAffected, final Thread thread) {
++ this.lock = lock;
++ this.areaAffected = areaAffected;
++ this.thread = thread;
++ }
++
++ @Override
++ public String toString() {
++ return "Node{" +
++ "areaAffected=" + IntPairUtil.toString(this.areaAffected, 0, this.areaAffectedLen) +
++ ", thread=" + this.thread +
++ '}';
++ }
++ }
++}
+diff --git a/src/main/java/ca/spottedleaf/concurrentutil/map/ConcurrentLong2ReferenceChainedHashTable.java b/src/main/java/ca/spottedleaf/concurrentutil/map/ConcurrentLong2ReferenceChainedHashTable.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..6abee91e0d83c6a172e890bbda304a512cf790a1
+--- /dev/null
++++ b/src/main/java/ca/spottedleaf/concurrentutil/map/ConcurrentLong2ReferenceChainedHashTable.java
+@@ -0,0 +1,1681 @@
++package ca.spottedleaf.concurrentutil.map;
++
++import ca.spottedleaf.concurrentutil.function.BiLong1Function;
++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
++import ca.spottedleaf.concurrentutil.util.HashUtil;
++import ca.spottedleaf.concurrentutil.util.IntegerUtil;
++import ca.spottedleaf.concurrentutil.util.ThrowUtil;
++import ca.spottedleaf.concurrentutil.util.Validate;
++
++import java.lang.invoke.VarHandle;
++import java.util.Arrays;
++import java.util.Iterator;
++import java.util.NoSuchElementException;
++import java.util.PrimitiveIterator;
++import java.util.concurrent.atomic.LongAdder;
++import java.util.function.BiFunction;
++import java.util.function.Consumer;
++import java.util.function.Function;
++import java.util.function.LongConsumer;
++import java.util.function.LongFunction;
++import java.util.function.Predicate;
++
++/**
++ * Concurrent hashtable implementation supporting mapping arbitrary {@code long} values onto non-null {@code Object}
++ * values with support for multiple writer and multiple reader threads.
++ *
++ * Happens-before relationship
++ *
++ * As with {@link java.util.concurrent.ConcurrentMap}, there is a happens-before relationship between actions in one thread
++ * prior to writing to the map and access to the results of those actions in another thread.
++ *
++ *
++ * Atomicity of functional methods
++ *
++ * Functional methods are functions declared in this class which possibly perform a write (remove, replace, or modify)
++ * to an entry in this map as a result of invoking a function on an input parameter. For example, {@link #compute(long, BiLong1Function)},
++ * {@link #merge(long, Object, BiFunction)} and {@link #removeIf(long, Predicate)} are examples of functional methods.
++ * Functional methods will be performed atomically, that is, the input parameter is guaranteed to only be invoked at most
++ * once per function call. The consequence of this behavior however is that a critical lock for a bin entry is held, which
++ * means that if the input parameter invocation makes additional calls to write into this hash table that the result
++ * is undefined and deadlock-prone.
++ *
++ *
++ * @param
++ * @see java.util.concurrent.ConcurrentMap
++ */
++public class ConcurrentLong2ReferenceChainedHashTable {
++
++ protected static final int DEFAULT_CAPACITY = 16;
++ protected static final float DEFAULT_LOAD_FACTOR = 0.75f;
++ protected static final int MAXIMUM_CAPACITY = Integer.MIN_VALUE >>> 1;
++
++ protected final LongAdder size = new LongAdder();
++ protected final float loadFactor;
++
++ protected volatile TableEntry[] table;
++
++ protected static final int THRESHOLD_NO_RESIZE = -1;
++ protected static final int THRESHOLD_RESIZING = -2;
++ protected volatile int threshold;
++ protected static final VarHandle THRESHOLD_HANDLE = ConcurrentUtil.getVarHandle(ConcurrentLong2ReferenceChainedHashTable.class, "threshold", int.class);
++
++ protected final int getThresholdAcquire() {
++ return (int)THRESHOLD_HANDLE.getAcquire(this);
++ }
++
++ protected final int getThresholdVolatile() {
++ return (int)THRESHOLD_HANDLE.getVolatile(this);
++ }
++
++ protected final void setThresholdPlain(final int threshold) {
++ THRESHOLD_HANDLE.set(this, threshold);
++ }
++
++ protected final void setThresholdRelease(final int threshold) {
++ THRESHOLD_HANDLE.setRelease(this, threshold);
++ }
++
++ protected final void setThresholdVolatile(final int threshold) {
++ THRESHOLD_HANDLE.setVolatile(this, threshold);
++ }
++
++ protected final int compareExchangeThresholdVolatile(final int expect, final int update) {
++ return (int)THRESHOLD_HANDLE.compareAndExchange(this, expect, update);
++ }
++
++ public ConcurrentLong2ReferenceChainedHashTable() {
++ this(DEFAULT_CAPACITY, DEFAULT_LOAD_FACTOR);
++ }
++
++ protected static int getTargetThreshold(final int capacity, final float loadFactor) {
++ final double ret = (double)capacity * (double)loadFactor;
++ if (Double.isInfinite(ret) || ret >= ((double)Integer.MAX_VALUE)) {
++ return THRESHOLD_NO_RESIZE;
++ }
++
++ return (int)Math.ceil(ret);
++ }
++
++ protected static int getCapacityFor(final int capacity) {
++ if (capacity <= 0) {
++ throw new IllegalArgumentException("Invalid capacity: " + capacity);
++ }
++ if (capacity >= MAXIMUM_CAPACITY) {
++ return MAXIMUM_CAPACITY;
++ }
++ return IntegerUtil.roundCeilLog2(capacity);
++ }
++
++ protected ConcurrentLong2ReferenceChainedHashTable(final int capacity, final float loadFactor) {
++ final int tableSize = getCapacityFor(capacity);
++
++ if (loadFactor <= 0.0 || !Float.isFinite(loadFactor)) {
++ throw new IllegalArgumentException("Invalid load factor: " + loadFactor);
++ }
++
++ if (tableSize == MAXIMUM_CAPACITY) {
++ this.setThresholdPlain(THRESHOLD_NO_RESIZE);
++ } else {
++ this.setThresholdPlain(getTargetThreshold(tableSize, loadFactor));
++ }
++
++ this.loadFactor = loadFactor;
++ // noinspection unchecked
++ this.table = (TableEntry[])new TableEntry[tableSize];
++ }
++
++ public static ConcurrentLong2ReferenceChainedHashTable createWithCapacity(final int capacity) {
++ return createWithCapacity(capacity, DEFAULT_LOAD_FACTOR);
++ }
++
++ public static ConcurrentLong2ReferenceChainedHashTable createWithCapacity(final int capacity, final float loadFactor) {
++ return new ConcurrentLong2ReferenceChainedHashTable<>(capacity, loadFactor);
++ }
++
++ public static ConcurrentLong2ReferenceChainedHashTable createWithExpected(final int expected) {
++ return createWithExpected(expected, DEFAULT_LOAD_FACTOR);
++ }
++
++ public static ConcurrentLong2ReferenceChainedHashTable createWithExpected(final int expected, final float loadFactor) {
++ final int capacity = (int)Math.ceil((double)expected / (double)loadFactor);
++
++ return createWithCapacity(capacity, loadFactor);
++ }
++
++ /** must be deterministic given a key */
++ protected static int getHash(final long key) {
++ return (int)HashUtil.mix(key);
++ }
++
++ /**
++ * Returns the load factor associated with this map.
++ */
++ public final float getLoadFactor() {
++ return this.loadFactor;
++ }
++
++ protected static TableEntry getAtIndexVolatile(final TableEntry[] table, final int index) {
++ //noinspection unchecked
++ return (TableEntry)TableEntry.TABLE_ENTRY_ARRAY_HANDLE.getVolatile(table, index);
++ }
++
++ protected static void setAtIndexRelease(final TableEntry[] table, final int index, final TableEntry value) {
++ TableEntry.TABLE_ENTRY_ARRAY_HANDLE.setRelease(table, index, value);
++ }
++
++ protected static void setAtIndexVolatile(final TableEntry[] table, final int index, final TableEntry value) {
++ TableEntry.TABLE_ENTRY_ARRAY_HANDLE.setVolatile(table, index, value);
++ }
++
++ protected static TableEntry compareAndExchangeAtIndexVolatile(final TableEntry[] table, final int index,
++ final TableEntry expect, final TableEntry update) {
++ //noinspection unchecked
++ return (TableEntry)TableEntry.TABLE_ENTRY_ARRAY_HANDLE.compareAndExchange(table, index, expect, update);
++ }
++
++ /**
++ * Returns the possible node associated with the key, or {@code null} if there is no such node. The node
++ * returned may have a {@code null} {@link TableEntry#value}, in which case the node is a placeholder for
++ * a compute/computeIfAbsent call. The placeholder node should not be considered mapped in order to preserve
++ * happens-before relationships between writes and reads in the map.
++ */
++ protected final TableEntry getNode(final long key) {
++ final int hash = getHash(key);
++
++ TableEntry[] table = this.table;
++ for (;;) {
++ TableEntry node = getAtIndexVolatile(table, hash & (table.length - 1));
++
++ if (node == null) {
++ // node == null
++ return node;
++ }
++
++ if (node.resize) {
++ table = (TableEntry[])node.getValuePlain();
++ continue;
++ }
++
++ for (; node != null; node = node.getNextVolatile()) {
++ if (node.key == key) {
++ return node;
++ }
++ }
++
++ // node == null
++ return node;
++ }
++ }
++
++ /**
++ * Returns the currently mapped value associated with the specified key, or {@code null} if there is none.
++ *
++ * @param key Specified key
++ */
++ public V get(final long key) {
++ final TableEntry node = this.getNode(key);
++ return node == null ? null : node.getValueVolatile();
++ }
++
++ /**
++ * Returns the currently mapped value associated with the specified key, or the specified default value if there is none.
++ *
++ * @param key Specified key
++ * @param defaultValue Specified default value
++ */
++ public V getOrDefault(final long key, final V defaultValue) {
++ final TableEntry node = this.getNode(key);
++ if (node == null) {
++ return defaultValue;
++ }
++
++ final V ret = node.getValueVolatile();
++ if (ret == null) {
++ // ret == null for nodes pre-allocated to compute() and friends
++ return defaultValue;
++ }
++
++ return ret;
++ }
++
++ /**
++ * Returns whether the specified key is mapped to some value.
++ * @param key Specified key
++ */
++ public boolean containsKey(final long key) {
++ // cannot use getNode, as the node may be a placeholder for compute()
++ return this.get(key) != null;
++ }
++
++ /**
++ * Returns whether the specified value has a key mapped to it.
++ * @param value Specified value
++ * @throws NullPointerException If value is null
++ */
++ public boolean containsValue(final V value) {
++ Validate.notNull(value, "Value cannot be null");
++
++ final NodeIterator iterator = new NodeIterator<>(this.table);
++
++ TableEntry node;
++ while ((node = iterator.findNext()) != null) {
++ // need to use acquire here to ensure the happens-before relationship
++ if (node.getValueAcquire() == value) {
++ return true;
++ }
++ }
++
++ return false;
++ }
++
++ /**
++ * Returns the number of mappings in this map.
++ */
++ public int size() {
++ final long ret = this.size.sum();
++
++ if (ret <= 0L) {
++ return 0;
++ }
++ if (ret >= (long)Integer.MAX_VALUE) {
++ return Integer.MAX_VALUE;
++ }
++
++ return (int)ret;
++ }
++
++ /**
++ * Returns whether this map has no mappings.
++ */
++ public boolean isEmpty() {
++ return this.size.sum() <= 0L;
++ }
++
++ /**
++ * Adds count to size and checks threshold for resizing
++ */
++ protected final void addSize(final long count) {
++ this.size.add(count);
++
++ final int threshold = this.getThresholdAcquire();
++
++ if (threshold < 0L) {
++ // resizing or no resizing allowed, in either cases we do not need to do anything
++ return;
++ }
++
++ final long sum = this.size.sum();
++
++ if (sum < (long)threshold) {
++ return;
++ }
++
++ if (threshold != this.compareExchangeThresholdVolatile(threshold, THRESHOLD_RESIZING)) {
++ // some other thread resized
++ return;
++ }
++
++ // create new table
++ this.resize(sum);
++ }
++
++ /**
++ * Resizes table, only invoke for the thread which has successfully updated threshold to {@link #THRESHOLD_RESIZING}
++ * @param sum Estimate of current mapping count, must be >= old threshold
++ */
++ private void resize(final long sum) {
++ int capacity;
++
++ // add 1.0, as sum may equal threshold (in which case, sum / loadFactor = current capacity)
++ // adding 1.0 should at least raise the size by a factor of two due to usage of roundCeilLog2
++ final double targetD = ((double)sum / (double)this.loadFactor) + 1.0;
++ if (targetD >= (double)MAXIMUM_CAPACITY) {
++ capacity = MAXIMUM_CAPACITY;
++ } else {
++ capacity = (int)Math.ceil(targetD);
++ capacity = IntegerUtil.roundCeilLog2(capacity);
++ if (capacity > MAXIMUM_CAPACITY) {
++ capacity = MAXIMUM_CAPACITY;
++ }
++ }
++
++ // create new table data
++
++ final TableEntry[] newTable = new TableEntry[capacity];
++ // noinspection unchecked
++ final TableEntry resizeNode = new TableEntry<>(0L, (V)newTable, true);
++
++ // transfer nodes from old table
++
++ // does not need to be volatile read, just plain
++ final TableEntry[] oldTable = this.table;
++
++ // when resizing, the old entries at bin i (where i = hash % oldTable.length) are assigned to
++ // bin k in the new table (where k = hash % newTable.length)
++ // since both table lengths are powers of two (specifically, newTable is a multiple of oldTable),
++ // the possible number of locations in the new table to assign any given i is newTable.length/oldTable.length
++
++ // we can build the new linked nodes for the new table by using a work array sized to newTable.length/oldTable.length
++ // which holds the _last_ entry in the chain per bin
++
++ final int capOldShift = IntegerUtil.floorLog2(oldTable.length);
++ final int capDiffShift = IntegerUtil.floorLog2(capacity) - capOldShift;
++
++ if (capDiffShift == 0) {
++ throw new IllegalStateException("Resizing to same size");
++ }
++
++ final TableEntry[] work = new TableEntry[1 << capDiffShift]; // typically, capDiffShift = 1
++
++ for (int i = 0, len = oldTable.length; i < len; ++i) {
++ TableEntry binNode = getAtIndexVolatile(oldTable, i);
++
++ for (;;) {
++ if (binNode == null) {
++ // just need to replace the bin node, do not need to move anything
++ if (null == (binNode = compareAndExchangeAtIndexVolatile(oldTable, i, null, resizeNode))) {
++ break;
++ } // else: binNode != null, fall through
++ }
++
++ // need write lock to block other writers
++ synchronized (binNode) {
++ if (binNode != (binNode = getAtIndexVolatile(oldTable, i))) {
++ continue;
++ }
++
++ // an important detail of resizing is that we do not need to be concerned with synchronisation on
++ // writes to the new table, as no access to any nodes on bin i on oldTable will occur until a thread
++ // sees the resizeNode
++ // specifically, as long as the resizeNode is release written there are no cases where another thread
++ // will see our writes to the new table
++
++ TableEntry next = binNode.getNextPlain();
++
++ if (next == null) {
++ // simple case: do not use work array
++
++ // do not need to create new node, readers only need to see the state of the map at the
++ // beginning of a call, so any additions onto _next_ don't really matter
++ // additionally, the old node is replaced so that writers automatically forward to the new table,
++ // which resolves any issues
++ newTable[getHash(binNode.key) & (capacity - 1)] = binNode;
++ } else {
++ // reset for next usage
++ Arrays.fill(work, null);
++
++ for (TableEntry curr = binNode; curr != null; curr = curr.getNextPlain()) {
++ final int newTableIdx = getHash(curr.key) & (capacity - 1);
++ final int workIdx = newTableIdx >>> capOldShift;
++
++ final TableEntry replace = new TableEntry<>(curr.key, curr.getValuePlain());
++
++ final TableEntry workNode = work[workIdx];
++ work[workIdx] = replace;
++
++ if (workNode == null) {
++ newTable[newTableIdx] = replace;
++ continue;
++ } else {
++ workNode.setNextPlain(replace);
++ continue;
++ }
++ }
++ }
++
++ setAtIndexRelease(oldTable, i, resizeNode);
++ break;
++ }
++ }
++ }
++
++ // calculate new threshold
++ final int newThreshold;
++ if (capacity == MAXIMUM_CAPACITY) {
++ newThreshold = THRESHOLD_NO_RESIZE;
++ } else {
++ newThreshold = getTargetThreshold(capacity, loadFactor);
++ }
++
++ this.table = newTable;
++ // finish resize operation by releasing hold on threshold
++ this.setThresholdVolatile(newThreshold);
++ }
++
++ /**
++ * Subtracts count from size
++ */
++ protected final void subSize(final long count) {
++ this.size.add(-count);
++ }
++
++ /**
++ * Atomically updates the value associated with {@code key} to {@code value}, or inserts a new mapping with {@code key}
++ * mapped to {@code value}.
++ * @param key Specified key
++ * @param value Specified value
++ * @throws NullPointerException If value is null
++ * @return Old value previously associated with key, or {@code null} if none.
++ */
++ public V put(final long key, final V value) {
++ Validate.notNull(value, "Value may not be null");
++
++ final int hash = getHash(key);
++
++ TableEntry[] table = this.table;
++ table_loop:
++ for (;;) {
++ final int index = hash & (table.length - 1);
++
++ TableEntry node = getAtIndexVolatile(table, index);
++ node_loop:
++ for (;;) {
++ if (node == null) {
++ if (null == (node = compareAndExchangeAtIndexVolatile(table, index, null, new TableEntry<>(key, value)))) {
++ // successfully inserted
++ this.addSize(1L);
++ return null;
++ } // else: node != null, fall through
++ }
++
++ if (node.resize) {
++ table = (TableEntry[])node.getValuePlain();
++ continue table_loop;
++ }
++
++ synchronized (node) {
++ if (node != (node = getAtIndexVolatile(table, index))) {
++ continue node_loop;
++ }
++ // plain reads are fine during synchronised access, as we are the only writer
++ TableEntry prev = null;
++ for (; node != null; prev = node, node = node.getNextPlain()) {
++ if (node.key == key) {
++ final V ret = node.getValuePlain();
++ node.setValueVolatile(value);
++ return ret;
++ }
++ }
++
++ // volatile ordering ensured by addSize(), but we need release here
++ // to ensure proper ordering with reads and other writes
++ prev.setNextRelease(new TableEntry<>(key, value));
++ }
++
++ this.addSize(1L);
++ return null;
++ }
++ }
++ }
++
++ /**
++ * Atomically inserts a new mapping with {@code key} mapped to {@code value} if and only if {@code key} is not
++ * currently mapped to some value.
++ * @param key Specified key
++ * @param value Specified value
++ * @throws NullPointerException If value is null
++ * @return Value currently associated with key, or {@code null} if none and {@code value} was associated.
++ */
++ public V putIfAbsent(final long key, final V value) {
++ Validate.notNull(value, "Value may not be null");
++
++ final int hash = getHash(key);
++
++ TableEntry[] table = this.table;
++ table_loop:
++ for (;;) {
++ final int index = hash & (table.length - 1);
++
++ TableEntry node = getAtIndexVolatile(table, index);
++ node_loop:
++ for (;;) {
++ if (node == null) {
++ if (null == (node = compareAndExchangeAtIndexVolatile(table, index, null, new TableEntry<>(key, value)))) {
++ // successfully inserted
++ this.addSize(1L);
++ return null;
++ } // else: node != null, fall through
++ }
++
++ if (node.resize) {
++ table = (TableEntry[])node.getValuePlain();
++ continue table_loop;
++ }
++
++ // optimise ifAbsent calls: check if first node is key before attempting lock acquire
++ if (node.key == key) {
++ final V ret = node.getValueVolatile();
++ if (ret != null) {
++ return ret;
++ } // else: fall back to lock to read the node
++ }
++
++ synchronized (node) {
++ if (node != (node = getAtIndexVolatile(table, index))) {
++ continue node_loop;
++ }
++ // plain reads are fine during synchronised access, as we are the only writer
++ TableEntry prev = null;
++ for (; node != null; prev = node, node = node.getNextPlain()) {
++ if (node.key == key) {
++ return node.getValuePlain();
++ }
++ }
++
++ // volatile ordering ensured by addSize(), but we need release here
++ // to ensure proper ordering with reads and other writes
++ prev.setNextRelease(new TableEntry<>(key, value));
++ }
++
++ this.addSize(1L);
++ return null;
++ }
++ }
++ }
++
++ /**
++ * Atomically updates the value associated with {@code key} to {@code value}, or does nothing if {@code key} is not
++ * associated with a value.
++ * @param key Specified key
++ * @param value Specified value
++ * @throws NullPointerException If value is null
++ * @return Old value previously associated with key, or {@code null} if none.
++ */
++ public V replace(final long key, final V value) {
++ Validate.notNull(value, "Value may not be null");
++
++ final int hash = getHash(key);
++
++ TableEntry[] table = this.table;
++ table_loop:
++ for (;;) {
++ final int index = hash & (table.length - 1);
++
++ TableEntry node = getAtIndexVolatile(table, index);
++ node_loop:
++ for (;;) {
++ if (node == null) {
++ return null;
++ }
++
++ if (node.resize) {
++ table = (TableEntry[])node.getValuePlain();
++ continue table_loop;
++ }
++
++ synchronized (node) {
++ if (node != (node = getAtIndexVolatile(table, index))) {
++ continue node_loop;
++ }
++
++ // plain reads are fine during synchronised access, as we are the only writer
++ for (; node != null; node = node.getNextPlain()) {
++ if (node.key == key) {
++ final V ret = node.getValuePlain();
++ node.setValueVolatile(value);
++ return ret;
++ }
++ }
++ }
++
++ return null;
++ }
++ }
++ }
++
++ /**
++ * Atomically updates the value associated with {@code key} to {@code update} if the currently associated
++ * value is reference equal to {@code expect}, otherwise does nothing.
++ * @param key Specified key
++ * @param expect Expected value to check current mapped value with
++ * @param update Update value to replace mapped value with
++ * @throws NullPointerException If value is null
++ * @return If the currently mapped value is not reference equal to {@code expect}, then returns the currently mapped
++ * value. If the key is not mapped to any value, then returns {@code null}. If neither of the two cases are
++ * true, then returns {@code expect}.
++ */
++ public V replace(final long key, final V expect, final V update) {
++ Validate.notNull(expect, "Expect may not be null");
++ Validate.notNull(update, "Update may not be null");
++
++ final int hash = getHash(key);
++
++ TableEntry[] table = this.table;
++ table_loop:
++ for (;;) {
++ final int index = hash & (table.length - 1);
++
++ TableEntry node = getAtIndexVolatile(table, index);
++ node_loop:
++ for (;;) {
++ if (node == null) {
++ return null;
++ }
++
++ if (node.resize) {
++ table = (TableEntry[])node.getValuePlain();
++ continue table_loop;
++ }
++
++ synchronized (node) {
++ if (node != (node = getAtIndexVolatile(table, index))) {
++ continue node_loop;
++ }
++
++ // plain reads are fine during synchronised access, as we are the only writer
++ for (; node != null; node = node.getNextPlain()) {
++ if (node.key == key) {
++ final V ret = node.getValuePlain();
++
++ if (ret != expect) {
++ return ret;
++ }
++
++ node.setValueVolatile(update);
++ return ret;
++ }
++ }
++ }
++
++ return null;
++ }
++ }
++ }
++
++ /**
++ * Atomically removes the mapping for the specified key and returns the value it was associated with. If the key
++ * is not mapped to a value, then does nothing and returns {@code null}.
++ * @param key Specified key
++ * @return Old value previously associated with key, or {@code null} if none.
++ */
++ public V remove(final long key) {
++ final int hash = getHash(key);
++
++ TableEntry[] table = this.table;
++ table_loop:
++ for (;;) {
++ final int index = hash & (table.length - 1);
++
++ TableEntry node = getAtIndexVolatile(table, index);
++ node_loop:
++ for (;;) {
++ if (node == null) {
++ return null;
++ }
++
++ if (node.resize) {
++ table = (TableEntry[])node.getValuePlain();
++ continue table_loop;
++ }
++
++ boolean removed = false;
++ V ret = null;
++
++ synchronized (node) {
++ if (node != (node = getAtIndexVolatile(table, index))) {
++ continue node_loop;
++ }
++
++ TableEntry prev = null;
++
++ // plain reads are fine during synchronised access, as we are the only writer
++ for (; node != null; prev = node, node = node.getNextPlain()) {
++ if (node.key == key) {
++ ret = node.getValuePlain();
++ removed = true;
++
++ // volatile ordering ensured by addSize(), but we need release here
++ // to ensure proper ordering with reads and other writes
++ if (prev == null) {
++ setAtIndexRelease(table, index, node.getNextPlain());
++ } else {
++ prev.setNextRelease(node.getNextPlain());
++ }
++
++ break;
++ }
++ }
++ }
++
++ if (removed) {
++ this.subSize(1L);
++ }
++
++ return ret;
++ }
++ }
++ }
++
++ /**
++ * Atomically removes the mapping for the specified key if it is mapped to {@code expect} and returns {@code expect}. If the key
++ * is not mapped to a value, then does nothing and returns {@code null}. If the key is mapped to a value that is not reference
++ * equal to {@code expect}, then returns that value.
++ * @param key Specified key
++ * @param expect Specified expected value
++ * @return The specified expected value if the key was mapped to {@code expect}. If
++ * the key is not mapped to any value, then returns {@code null}. If neither of those cases are true,
++ * then returns the current (non-null) mapped value for key.
++ */
++ public V remove(final long key, final V expect) {
++ final int hash = getHash(key);
++
++ TableEntry[] table = this.table;
++ table_loop:
++ for (;;) {
++ final int index = hash & (table.length - 1);
++
++ TableEntry node = getAtIndexVolatile(table, index);
++ node_loop:
++ for (;;) {
++ if (node == null) {
++ return null;
++ }
++
++ if (node.resize) {
++ table = (TableEntry[])node.getValuePlain();
++ continue table_loop;
++ }
++
++ boolean removed = false;
++ V ret = null;
++
++ synchronized (node) {
++ if (node != (node = getAtIndexVolatile(table, index))) {
++ continue node_loop;
++ }
++
++ TableEntry prev = null;
++
++ // plain reads are fine during synchronised access, as we are the only writer
++ for (; node != null; prev = node, node = node.getNextPlain()) {
++ if (node.key == key) {
++ ret = node.getValuePlain();
++ if (ret == expect) {
++ removed = true;
++
++ // volatile ordering ensured by addSize(), but we need release here
++ // to ensure proper ordering with reads and other writes
++ if (prev == null) {
++ setAtIndexRelease(table, index, node.getNextPlain());
++ } else {
++ prev.setNextRelease(node.getNextPlain());
++ }
++ }
++ break;
++ }
++ }
++ }
++
++ if (removed) {
++ this.subSize(1L);
++ }
++
++ return ret;
++ }
++ }
++ }
++
++ /**
++ * Atomically removes the mapping for the specified key the predicate returns true for its currently mapped value. If the key
++ * is not mapped to a value, then does nothing and returns {@code null}.
++ *
++ *
++ * This function is a "functional methods" as defined by {@link ConcurrentLong2ReferenceChainedHashTable}.
++ *
++ *
++ * @param key Specified key
++ * @param predicate Specified predicate
++ * @throws NullPointerException If predicate is null
++ * @return The specified expected value if the key was mapped to {@code expect}. If
++ * the key is not mapped to any value, then returns {@code null}. If neither of those cases are true,
++ * then returns the current (non-null) mapped value for key.
++ */
++ public V removeIf(final long key, final Predicate super V> predicate) {
++ Validate.notNull(predicate, "Predicate may not be null");
++
++ final int hash = getHash(key);
++
++ TableEntry[] table = this.table;
++ table_loop:
++ for (;;) {
++ final int index = hash & (table.length - 1);
++
++ TableEntry node = getAtIndexVolatile(table, index);
++ node_loop:
++ for (;;) {
++ if (node == null) {
++ return null;
++ }
++
++ if (node.resize) {
++ table = (TableEntry[])node.getValuePlain();
++ continue table_loop;
++ }
++
++ boolean removed = false;
++ V ret = null;
++
++ synchronized (node) {
++ if (node != (node = getAtIndexVolatile(table, index))) {
++ continue node_loop;
++ }
++
++ TableEntry prev = null;
++
++ // plain reads are fine during synchronised access, as we are the only writer
++ for (; node != null; prev = node, node = node.getNextPlain()) {
++ if (node.key == key) {
++ ret = node.getValuePlain();
++ if (predicate.test(ret)) {
++ removed = true;
++
++ // volatile ordering ensured by addSize(), but we need release here
++ // to ensure proper ordering with reads and other writes
++ if (prev == null) {
++ setAtIndexRelease(table, index, node.getNextPlain());
++ } else {
++ prev.setNextRelease(node.getNextPlain());
++ }
++ }
++ break;
++ }
++ }
++ }
++
++ if (removed) {
++ this.subSize(1L);
++ }
++
++ return ret;
++ }
++ }
++ }
++
++ /**
++ * See {@link java.util.concurrent.ConcurrentMap#compute(Object, BiFunction)}
++ *
++ * This function is a "functional methods" as defined by {@link ConcurrentLong2ReferenceChainedHashTable}.
++ *
++ */
++ public V compute(final long key, final BiLong1Function super V, ? extends V> function) {
++ final int hash = getHash(key);
++
++ TableEntry[] table = this.table;
++ table_loop:
++ for (;;) {
++ final int index = hash & (table.length - 1);
++
++ TableEntry node = getAtIndexVolatile(table, index);
++ node_loop:
++ for (;;) {
++ V ret = null;
++ if (node == null) {
++ final TableEntry insert = new TableEntry<>(key, null);
++
++ boolean added = false;
++
++ synchronized (insert) {
++ if (null == (node = compareAndExchangeAtIndexVolatile(table, index, null, insert))) {
++ try {
++ ret = function.apply(key, null);
++ } catch (final Throwable throwable) {
++ setAtIndexVolatile(table, index, null);
++ ThrowUtil.throwUnchecked(throwable);
++ // unreachable
++ return null;
++ }
++
++ if (ret == null) {
++ setAtIndexVolatile(table, index, null);
++ return ret;
++ } else {
++ // volatile ordering ensured by addSize(), but we need release here
++ // to ensure proper ordering with reads and other writes
++ insert.setValueRelease(ret);
++ added = true;
++ }
++ } // else: node != null, fall through
++ }
++
++ if (added) {
++ this.addSize(1L);
++ return ret;
++ }
++ }
++
++ if (node.resize) {
++ table = (TableEntry[])node.getValuePlain();
++ continue table_loop;
++ }
++
++ boolean removed = false;
++ boolean added = false;
++
++ synchronized (node) {
++ if (node != (node = getAtIndexVolatile(table, index))) {
++ continue node_loop;
++ }
++ // plain reads are fine during synchronised access, as we are the only writer
++ TableEntry prev = null;
++ for (; node != null; prev = node, node = node.getNextPlain()) {
++ if (node.key == key) {
++ final V old = node.getValuePlain();
++
++ final V computed = function.apply(key, old);
++
++ if (computed != null) {
++ node.setValueVolatile(computed);
++ return computed;
++ }
++
++ // volatile ordering ensured by addSize(), but we need release here
++ // to ensure proper ordering with reads and other writes
++ if (prev == null) {
++ setAtIndexRelease(table, index, node.getNextPlain());
++ } else {
++ prev.setNextRelease(node.getNextPlain());
++ }
++
++ removed = true;
++ break;
++ }
++ }
++
++ if (!removed) {
++ final V computed = function.apply(key, null);
++ if (computed != null) {
++ // volatile ordering ensured by addSize(), but we need release here
++ // to ensure proper ordering with reads and other writes
++ prev.setNextRelease(new TableEntry<>(key, computed));
++ ret = computed;
++ added = true;
++ }
++ }
++ }
++
++ if (removed) {
++ this.subSize(1L);
++ }
++ if (added) {
++ this.addSize(1L);
++ }
++
++ return ret;
++ }
++ }
++ }
++
++ /**
++ * See {@link java.util.concurrent.ConcurrentMap#computeIfAbsent(Object, Function)}
++ *
++ * This function is a "functional methods" as defined by {@link ConcurrentLong2ReferenceChainedHashTable}.
++ *
++ */
++ public V computeIfAbsent(final long key, final LongFunction extends V> function) {
++ final int hash = getHash(key);
++
++ TableEntry[] table = this.table;
++ table_loop:
++ for (;;) {
++ final int index = hash & (table.length - 1);
++
++ TableEntry node = getAtIndexVolatile(table, index);
++ node_loop:
++ for (;;) {
++ V ret = null;
++ if (node == null) {
++ final TableEntry insert = new TableEntry<>(key, null);
++
++ boolean added = false;
++
++ synchronized (insert) {
++ if (null == (node = compareAndExchangeAtIndexVolatile(table, index, null, insert))) {
++ try {
++ ret = function.apply(key);
++ } catch (final Throwable throwable) {
++ setAtIndexVolatile(table, index, null);
++ ThrowUtil.throwUnchecked(throwable);
++ // unreachable
++ return null;
++ }
++
++ if (ret == null) {
++ setAtIndexVolatile(table, index, null);
++ return null;
++ } else {
++ // volatile ordering ensured by addSize(), but we need release here
++ // to ensure proper ordering with reads and other writes
++ insert.setValueRelease(ret);
++ added = true;
++ }
++ } // else: node != null, fall through
++ }
++
++ if (added) {
++ this.addSize(1L);
++ return ret;
++ }
++ }
++
++ if (node.resize) {
++ table = (TableEntry[])node.getValuePlain();
++ continue table_loop;
++ }
++
++ // optimise ifAbsent calls: check if first node is key before attempting lock acquire
++ if (node.key == key) {
++ ret = node.getValueVolatile();
++ if (ret != null) {
++ return ret;
++ } // else: fall back to lock to read the node
++ }
++
++ boolean added = false;
++
++ synchronized (node) {
++ if (node != (node = getAtIndexVolatile(table, index))) {
++ continue node_loop;
++ }
++ // plain reads are fine during synchronised access, as we are the only writer
++ TableEntry prev = null;
++ for (; node != null; prev = node, node = node.getNextPlain()) {
++ if (node.key == key) {
++ ret = node.getValuePlain();
++ return ret;
++ }
++ }
++
++ final V computed = function.apply(key);
++ if (computed != null) {
++ // volatile ordering ensured by addSize(), but we need release here
++ // to ensure proper ordering with reads and other writes
++ prev.setNextRelease(new TableEntry<>(key, computed));
++ ret = computed;
++ added = true;
++ }
++ }
++
++ if (added) {
++ this.addSize(1L);
++ }
++
++ return ret;
++ }
++ }
++ }
++
++ /**
++ * See {@link java.util.concurrent.ConcurrentMap#computeIfPresent(Object, BiFunction)}
++ *
++ * This function is a "functional methods" as defined by {@link ConcurrentLong2ReferenceChainedHashTable}.
++ *
++ */
++ public V computeIfPresent(final long key, final BiLong1Function super V, ? extends V> function) {
++ final int hash = getHash(key);
++
++ TableEntry[] table = this.table;
++ table_loop:
++ for (;;) {
++ final int index = hash & (table.length - 1);
++
++ TableEntry node = getAtIndexVolatile(table, index);
++ node_loop:
++ for (;;) {
++ if (node == null) {
++ return null;
++ }
++
++ if (node.resize) {
++ table = (TableEntry[])node.getValuePlain();
++ continue table_loop;
++ }
++
++ boolean removed = false;
++
++ synchronized (node) {
++ if (node != (node = getAtIndexVolatile(table, index))) {
++ continue node_loop;
++ }
++ // plain reads are fine during synchronised access, as we are the only writer
++ TableEntry prev = null;
++ for (; node != null; prev = node, node = node.getNextPlain()) {
++ if (node.key == key) {
++ final V old = node.getValuePlain();
++
++ final V computed = function.apply(key, old);
++
++ if (computed != null) {
++ node.setValueVolatile(computed);
++ return computed;
++ }
++
++ // volatile ordering ensured by addSize(), but we need release here
++ // to ensure proper ordering with reads and other writes
++ if (prev == null) {
++ setAtIndexRelease(table, index, node.getNextPlain());
++ } else {
++ prev.setNextRelease(node.getNextPlain());
++ }
++
++ removed = true;
++ break;
++ }
++ }
++ }
++
++ if (removed) {
++ this.subSize(1L);
++ }
++
++ return null;
++ }
++ }
++ }
++
++ /**
++ * See {@link java.util.concurrent.ConcurrentMap#merge(Object, Object, BiFunction)}
++ *
++ * This function is a "functional methods" as defined by {@link ConcurrentLong2ReferenceChainedHashTable}.
++ *
++ */
++ public V merge(final long key, final V def, final BiFunction super V, ? super V, ? extends V> function) {
++ Validate.notNull(def, "Default value may not be null");
++
++ final int hash = getHash(key);
++
++ TableEntry[] table = this.table;
++ table_loop:
++ for (;;) {
++ final int index = hash & (table.length - 1);
++
++ TableEntry node = getAtIndexVolatile(table, index);
++ node_loop:
++ for (;;) {
++ if (node == null) {
++ if (null == (node = compareAndExchangeAtIndexVolatile(table, index, null, new TableEntry<>(key, def)))) {
++ // successfully inserted
++ this.addSize(1L);
++ return def;
++ } // else: node != null, fall through
++ }
++
++ if (node.resize) {
++ table = (TableEntry[])node.getValuePlain();
++ continue table_loop;
++ }
++
++ boolean removed = false;
++ boolean added = false;
++ V ret = null;
++
++ synchronized (node) {
++ if (node != (node = getAtIndexVolatile(table, index))) {
++ continue node_loop;
++ }
++ // plain reads are fine during synchronised access, as we are the only writer
++ TableEntry prev = null;
++ for (; node != null; prev = node, node = node.getNextPlain()) {
++ if (node.key == key) {
++ final V old = node.getValuePlain();
++
++ final V computed = function.apply(old, def);
++
++ if (computed != null) {
++ node.setValueVolatile(computed);
++ return computed;
++ }
++
++ // volatile ordering ensured by addSize(), but we need release here
++ // to ensure proper ordering with reads and other writes
++ if (prev == null) {
++ setAtIndexRelease(table, index, node.getNextPlain());
++ } else {
++ prev.setNextRelease(node.getNextPlain());
++ }
++
++ removed = true;
++ break;
++ }
++ }
++
++ if (!removed) {
++ // volatile ordering ensured by addSize(), but we need release here
++ // to ensure proper ordering with reads and other writes
++ prev.setNextRelease(new TableEntry<>(key, def));
++ ret = def;
++ added = true;
++ }
++ }
++
++ if (removed) {
++ this.subSize(1L);
++ }
++ if (added) {
++ this.addSize(1L);
++ }
++
++ return ret;
++ }
++ }
++ }
++
++ /**
++ * Removes at least all entries currently mapped at the beginning of this call. May not remove entries added during
++ * this call. As a result, only if this map is not modified during the call, that all entries will be removed by
++ * the end of the call.
++ *
++ *
++ * This function is not atomic.
++ *
++ */
++ public void clear() {
++ // it is possible to optimise this to directly interact with the table,
++ // but we do need to be careful when interacting with resized tables,
++ // and the NodeIterator already does this logic
++ final NodeIterator nodeIterator = new NodeIterator<>(this.table);
++
++ TableEntry node;
++ while ((node = nodeIterator.findNext()) != null) {
++ this.remove(node.key);
++ }
++ }
++
++ /**
++ * Returns an iterator over the entries in this map. The iterator is only guaranteed to see entries that were
++ * added before the beginning of this call, but it may see entries added during.
++ */
++ public Iterator> entryIterator() {
++ return new EntryIterator<>(this);
++ }
++
++ /**
++ * Returns an iterator over the keys in this map. The iterator is only guaranteed to see keys that were
++ * added before the beginning of this call, but it may see keys added during.
++ */
++ public PrimitiveIterator.OfLong keyIterator() {
++ return new KeyIterator<>(this);
++ }
++
++ /**
++ * Returns an iterator over the values in this map. The iterator is only guaranteed to see values that were
++ * added before the beginning of this call, but it may see values added during.
++ */
++ public Iterator valueIterator() {
++ return new ValueIterator<>(this);
++ }
++
++ protected static final class EntryIterator extends BaseIteratorImpl> {
++
++ protected EntryIterator(final ConcurrentLong2ReferenceChainedHashTable map) {
++ super(map);
++ }
++
++ @Override
++ public TableEntry next() throws NoSuchElementException {
++ return this.nextNode();
++ }
++
++ @Override
++ public void forEachRemaining(final Consumer super TableEntry> action) {
++ Validate.notNull(action, "Action may not be null");
++ while (this.hasNext()) {
++ action.accept(this.next());
++ }
++ }
++ }
++
++ protected static final class KeyIterator extends BaseIteratorImpl implements PrimitiveIterator.OfLong {
++
++ protected KeyIterator(final ConcurrentLong2ReferenceChainedHashTable map) {
++ super(map);
++ }
++
++ @Override
++ public Long next() throws NoSuchElementException {
++ return Long.valueOf(this.nextNode().key);
++ }
++
++ @Override
++ public long nextLong() {
++ return this.nextNode().key;
++ }
++
++ @Override
++ public void forEachRemaining(final Consumer super Long> action) {
++ Validate.notNull(action, "Action may not be null");
++
++ if (action instanceof LongConsumer longConsumer) {
++ this.forEachRemaining(longConsumer);
++ return;
++ }
++
++ while (this.hasNext()) {
++ action.accept(this.next());
++ }
++ }
++
++ @Override
++ public void forEachRemaining(final LongConsumer action) {
++ Validate.notNull(action, "Action may not be null");
++ while (this.hasNext()) {
++ action.accept(this.nextLong());
++ }
++ }
++ }
++
++ protected static final class ValueIterator extends BaseIteratorImpl {
++
++ protected ValueIterator(final ConcurrentLong2ReferenceChainedHashTable map) {
++ super(map);
++ }
++
++ @Override
++ public V next() throws NoSuchElementException {
++ return this.nextNode().getValueVolatile();
++ }
++
++ @Override
++ public void forEachRemaining(final Consumer super V> action) {
++ Validate.notNull(action, "Action may not be null");
++ while (this.hasNext()) {
++ action.accept(this.next());
++ }
++ }
++ }
++
++ protected static abstract class BaseIteratorImpl extends NodeIterator implements Iterator {
++
++ protected final ConcurrentLong2ReferenceChainedHashTable map;
++ protected TableEntry lastReturned;
++ protected TableEntry nextToReturn;
++
++ protected BaseIteratorImpl(final ConcurrentLong2ReferenceChainedHashTable map) {
++ super(map.table);
++ this.map = map;
++ }
++
++ @Override
++ public final boolean hasNext() {
++ if (this.nextToReturn != null) {
++ return true;
++ }
++
++ return (this.nextToReturn = this.findNext()) != null;
++ }
++
++ protected final TableEntry nextNode() throws NoSuchElementException {
++ TableEntry ret = this.nextToReturn;
++ if (ret != null) {
++ this.lastReturned = ret;
++ this.nextToReturn = null;
++ return ret;
++ }
++ ret = this.findNext();
++ if (ret != null) {
++ this.lastReturned = ret;
++ return ret;
++ }
++ throw new NoSuchElementException();
++ }
++
++ @Override
++ public final void remove() {
++ final TableEntry lastReturned = this.nextToReturn;
++ if (lastReturned == null) {
++ throw new NoSuchElementException();
++ }
++ this.lastReturned = null;
++ this.map.remove(lastReturned.key);
++ }
++
++ @Override
++ public abstract T next() throws NoSuchElementException;
++
++ // overwritten by subclasses to avoid indirection on hasNext() and next()
++ @Override
++ public abstract void forEachRemaining(final Consumer super T> action);
++ }
++
++ protected static class NodeIterator {
++
++ protected TableEntry[] currentTable;
++ protected ResizeChain resizeChain;
++ protected TableEntry last;
++ protected int nextBin;
++ protected int increment;
++
++ protected NodeIterator(final TableEntry[] baseTable) {
++ this.currentTable = baseTable;
++ this.increment = 1;
++ }
++
++ private TableEntry[] pullResizeChain(final int index) {
++ final ResizeChain resizeChain = this.resizeChain;
++ if (resizeChain == null) {
++ this.currentTable = null;
++ return null;
++ }
++ final TableEntry[] newTable = resizeChain.table;
++ if (newTable == null) {
++ this.currentTable = null;
++ return null;
++ }
++
++ // the increment is a multiple of table.length, so we can undo the increments on idx by taking the
++ // mod
++ int newIdx = index & (newTable.length - 1);
++
++ final ResizeChain newChain = this.resizeChain = resizeChain.prev;
++ final TableEntry[] prevTable = newChain.table;
++ final int increment;
++ if (prevTable == null) {
++ increment = 1;
++ } else {
++ increment = prevTable.length;
++ }
++
++ // done with the upper table, so we can skip the resize node
++ newIdx += increment;
++
++ this.increment = increment;
++ this.nextBin = newIdx;
++ this.currentTable = newTable;
++
++ return newTable;
++ }
++
++ private TableEntry[] pushResizeChain(final TableEntry[] table, final TableEntry entry) {
++ final ResizeChain chain = this.resizeChain;
++
++ if (chain == null) {
++ final TableEntry[] nextTable = (TableEntry[])entry.getValuePlain();
++
++ final ResizeChain oldChain = new ResizeChain<>(table, null, null);
++ final ResizeChain currChain = new ResizeChain<>(nextTable, oldChain, null);
++ oldChain.next = currChain;
++
++ this.increment = table.length;
++ this.resizeChain = currChain;
++ this.currentTable = nextTable;
++
++ return nextTable;
++ } else {
++ ResizeChain currChain = chain.next;
++ if (currChain == null) {
++ final TableEntry[] ret = (TableEntry[])entry.getValuePlain();
++ currChain = new ResizeChain<>(ret, chain, null);
++ chain.next = currChain;
++
++ this.increment = table.length;
++ this.resizeChain = currChain;
++ this.currentTable = ret;
++
++ return ret;
++ } else {
++ this.increment = table.length;
++ this.resizeChain = currChain;
++ return this.currentTable = currChain.table;
++ }
++ }
++ }
++
++ protected final TableEntry findNext() {
++ for (;;) {
++ final TableEntry last = this.last;
++ if (last != null) {
++ final TableEntry next = last.getNextVolatile();
++ if (next != null) {
++ this.last = next;
++ if (next.getValuePlain() == null) {
++ // compute() node not yet available
++ continue;
++ }
++ return next;
++ }
++ }
++
++ TableEntry[] table = this.currentTable;
++
++ if (table == null) {
++ return null;
++ }
++
++ int idx = this.nextBin;
++ int increment = this.increment;
++ for (;;) {
++ if (idx >= table.length) {
++ table = this.pullResizeChain(idx);
++ idx = this.nextBin;
++ increment = this.increment;
++ if (table != null) {
++ continue;
++ } else {
++ this.last = null;
++ return null;
++ }
++ }
++
++ final TableEntry entry = getAtIndexVolatile(table, idx);
++ if (entry == null) {
++ idx += increment;
++ continue;
++ }
++
++ if (entry.resize) {
++ // push onto resize chain
++ table = this.pushResizeChain(table, entry);
++ increment = this.increment;
++ continue;
++ }
++
++ this.last = entry;
++ this.nextBin = idx + increment;
++ if (entry.getValuePlain() != null) {
++ return entry;
++ } else {
++ // compute() node not yet available
++ break;
++ }
++ }
++ }
++ }
++
++ protected static final class ResizeChain {
++
++ protected final TableEntry[] table;
++ protected final ResizeChain prev;
++ protected ResizeChain next;
++
++ protected ResizeChain(final TableEntry[] table, final ResizeChain prev, final ResizeChain next) {
++ this.table = table;
++ this.next = next;
++ this.prev = prev;
++ }
++ }
++ }
++
++ public static final class TableEntry {
++
++ protected static final VarHandle TABLE_ENTRY_ARRAY_HANDLE = ConcurrentUtil.getArrayHandle(TableEntry[].class);
++
++ protected final boolean resize;
++
++ protected final long key;
++
++ protected volatile V value;
++ protected static final VarHandle VALUE_HANDLE = ConcurrentUtil.getVarHandle(TableEntry.class, "value", Object.class);
++
++ protected final V getValuePlain() {
++ //noinspection unchecked
++ return (V)VALUE_HANDLE.get(this);
++ }
++
++ protected final V getValueAcquire() {
++ //noinspection unchecked
++ return (V)VALUE_HANDLE.getAcquire(this);
++ }
++
++ protected final V getValueVolatile() {
++ //noinspection unchecked
++ return (V)VALUE_HANDLE.getVolatile(this);
++ }
++
++ protected final void setValuePlain(final V value) {
++ VALUE_HANDLE.set(this, (Object)value);
++ }
++
++ protected final void setValueRelease(final V value) {
++ VALUE_HANDLE.setRelease(this, (Object)value);
++ }
++
++ protected final void setValueVolatile(final V value) {
++ VALUE_HANDLE.setVolatile(this, (Object)value);
++ }
++
++ protected volatile TableEntry next;
++ protected static final VarHandle NEXT_HANDLE = ConcurrentUtil.getVarHandle(TableEntry.class, "next", TableEntry.class);
++
++ protected final TableEntry getNextPlain() {
++ //noinspection unchecked
++ return (TableEntry)NEXT_HANDLE.get(this);
++ }
++
++ protected final TableEntry getNextVolatile() {
++ //noinspection unchecked
++ return (TableEntry)NEXT_HANDLE.getVolatile(this);
++ }
++
++ protected final void setNextPlain(final TableEntry next) {
++ NEXT_HANDLE.set(this, next);
++ }
++
++ protected final void setNextRelease(final TableEntry next) {
++ NEXT_HANDLE.setRelease(this, next);
++ }
++
++ protected final void setNextVolatile(final TableEntry next) {
++ NEXT_HANDLE.setVolatile(this, next);
++ }
++
++ public TableEntry(final long key, final V value) {
++ this.resize = false;
++ this.key = key;
++ this.setValuePlain(value);
++ }
++
++ public TableEntry(final long key, final V value, final boolean resize) {
++ this.resize = resize;
++ this.key = key;
++ this.setValuePlain(value);
++ }
++
++ public long getKey() {
++ return this.key;
++ }
++
++ public V getValue() {
++ return this.getValueVolatile();
++ }
++ }
++}
+diff --git a/src/main/java/ca/spottedleaf/concurrentutil/map/SWMRHashTable.java b/src/main/java/ca/spottedleaf/concurrentutil/map/SWMRHashTable.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..83965350d292ccf42a34520d84dcda3f88146cff
+--- /dev/null
++++ b/src/main/java/ca/spottedleaf/concurrentutil/map/SWMRHashTable.java
+@@ -0,0 +1,1656 @@
+package ca.spottedleaf.concurrentutil.map;
+
-+import ca.spottedleaf.concurrentutil.util.ArrayUtil;
+import ca.spottedleaf.concurrentutil.util.CollectionUtil;
+import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
++import ca.spottedleaf.concurrentutil.util.HashUtil;
++import ca.spottedleaf.concurrentutil.util.IntegerUtil;
+import ca.spottedleaf.concurrentutil.util.Validate;
-+import io.papermc.paper.util.IntegerUtil;
+import java.lang.invoke.VarHandle;
+import java.util.ArrayList;
+import java.util.Arrays;
@@ -3724,7 +5926,7 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+ /**
+ * Constructs this map with the specified capacity and load factor.
+ * @param capacity specified capacity, > 0
-+ * @param loadFactor specified load factor, > 0 and finite
++ * @param loadFactor specified load factor, > 0 && finite
+ */
+ public SWMRHashTable(final int capacity, final float loadFactor) {
+ final int tableSize = getCapacityFor(capacity);
@@ -3772,7 +5974,7 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+ * with the specified load factor.
+ * All of the specified map's entries are copied into this map.
+ * @param capacity specified capacity, > 0
-+ * @param loadFactor specified load factor, > 0 and finite
++ * @param loadFactor specified load factor, > 0 && finite
+ * @param other The specified map.
+ */
+ public SWMRHashTable(final int capacity, final float loadFactor, final Map other) {
@@ -3780,6 +5982,15 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+ this.putAll(other);
+ }
+
++ protected static TableEntry getAtIndexOpaque(final TableEntry[] table, final int index) {
++ // noinspection unchecked
++ return (TableEntry)TableEntry.TABLE_ENTRY_ARRAY_HANDLE.getOpaque(table, index);
++ }
++
++ protected static void setAtIndexRelease(final TableEntry[] table, final int index, final TableEntry value) {
++ TableEntry.TABLE_ENTRY_ARRAY_HANDLE.setRelease(table, index, value);
++ }
++
+ public final float getLoadFactor() {
+ return this.loadFactor;
+ }
@@ -3799,7 +6010,7 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+ final int hash = SWMRHashTable.getHash(key);
+ final TableEntry[] table = this.getTableAcquire();
+
-+ for (TableEntry curr = ArrayUtil.getOpaque(table, hash & (table.length - 1)); curr != null; curr = curr.getNextOpaque()) {
++ for (TableEntry curr = getAtIndexOpaque(table, hash & (table.length - 1)); curr != null; curr = curr.getNextOpaque()) {
+ if (hash == curr.hash && (key == curr.key || curr.key.equals(key))) {
+ return curr;
+ }
@@ -3826,15 +6037,7 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+ /** must be deterministic given a key */
+ private static int getHash(final Object key) {
+ int hash = key == null ? 0 : key.hashCode();
-+ // inlined IntegerUtil#hash0
-+ hash *= 0x36935555;
-+ hash ^= hash >>> 16;
-+ return hash;
-+ }
-+
-+ static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
-+ static final int spread(int h) {
-+ return (h ^ (h >>> 16)) & HASH_BITS;
++ return HashUtil.mix(hash);
+ }
+
+ // rets -1 if capacity*loadFactor is too large
@@ -3856,10 +6059,9 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+ return true;
+ }
+ /* Make no attempt to deal with concurrent modifications */
-+ if (!(obj instanceof Map)) {
++ if (!(obj instanceof Map, ?> other)) {
+ return false;
+ }
-+ final Map, ?> other = (Map, ?>)obj;
+
+ if (this.size() != other.size()) {
+ return false;
@@ -3868,7 +6070,7 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+ final TableEntry[] table = this.getTableAcquire();
+
+ for (int i = 0, len = table.length; i < len; ++i) {
-+ for (TableEntry curr = ArrayUtil.getOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
++ for (TableEntry curr = getAtIndexOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
+ final V value = curr.getValueAcquire();
+
+ final Object otherValue = other.get(curr.key);
@@ -3891,7 +6093,7 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+ final TableEntry[] table = this.getTableAcquire();
+
+ for (int i = 0, len = table.length; i < len; ++i) {
-+ for (TableEntry curr = ArrayUtil.getOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
++ for (TableEntry curr = getAtIndexOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
+ hash += curr.hashCode();
+ }
+ }
@@ -3905,7 +6107,7 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+ @Override
+ public String toString() {
+ final StringBuilder builder = new StringBuilder(64);
-+ builder.append("SingleWriterMultiReaderHashMap:{");
++ builder.append("SWMRHashTable:{");
+
+ this.forEach((final K key, final V value) -> {
+ builder.append("{key: \"").append(key).append("\", value: \"").append(value).append("\"}");
@@ -3926,7 +6128,7 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+ * {@inheritDoc}
+ */
+ @Override
-+ public Iterator> iterator() {
++ public Iterator> iterator() {
+ return new EntryIterator<>(this.getTableAcquire(), this);
+ }
+
@@ -3934,12 +6136,12 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+ * {@inheritDoc}
+ */
+ @Override
-+ public void forEach(final Consumer super Entry> action) {
++ public void forEach(final Consumer super Map.Entry> action) {
+ Validate.notNull(action, "Null action");
+
+ final TableEntry[] table = this.getTableAcquire();
+ for (int i = 0, len = table.length; i < len; ++i) {
-+ for (TableEntry curr = ArrayUtil.getOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
++ for (TableEntry curr = getAtIndexOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
+ action.accept(curr);
+ }
+ }
@@ -3954,7 +6156,7 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+
+ final TableEntry[] table = this.getTableAcquire();
+ for (int i = 0, len = table.length; i < len; ++i) {
-+ for (TableEntry curr = ArrayUtil.getOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
++ for (TableEntry curr = getAtIndexOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
+ final V value = curr.getValueAcquire();
+
+ action.accept(curr.key, value);
@@ -3971,7 +6173,7 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+
+ final TableEntry[] table = this.getTableAcquire();
+ for (int i = 0, len = table.length; i < len; ++i) {
-+ for (TableEntry curr = ArrayUtil.getOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
++ for (TableEntry curr = getAtIndexOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
+ action.accept(curr.key);
+ }
+ }
@@ -3986,7 +6188,7 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+
+ final TableEntry[] table = this.getTableAcquire();
+ for (int i = 0, len = table.length; i < len; ++i) {
-+ for (TableEntry curr = ArrayUtil.getOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
++ for (TableEntry curr = getAtIndexOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
+ final V value = curr.getValueAcquire();
+
+ action.accept(value);
@@ -4046,7 +6248,7 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+
+ final TableEntry[] table = this.getTableAcquire();
+ for (int i = 0, len = table.length; i < len; ++i) {
-+ for (TableEntry curr = ArrayUtil.getOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
++ for (TableEntry curr = getAtIndexOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
+ final V currVal = curr.getValueAcquire();
+ if (currVal == value || currVal.equals(value)) {
+ return true;
@@ -4086,9 +6288,9 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+ return this.getSizeAcquire() == 0;
+ }
+
-+ protected Set keyset;
-+ protected Collection values;
-+ protected Set> entrySet;
++ protected KeySet keyset;
++ protected ValueCollection values;
++ protected EntrySet entrySet;
+
+ @Override
+ public Set keySet() {
@@ -4187,7 +6389,7 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+ final TableEntry head = table[index];
+ if (head == null) {
+ final TableEntry insert = new TableEntry<>(hash, key, value);
-+ ArrayUtil.setRelease(table, index, insert);
++ setAtIndexRelease(table, index, insert);
+ this.addToSize(1);
+ return null;
+ }
@@ -4242,7 +6444,7 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+ ++removed;
+ this.removeFromSizePlain(1); /* required in case predicate throws an exception */
+
-+ ArrayUtil.setRelease(table, i, curr = curr.getNextPlain());
++ setAtIndexRelease(table, i, curr = curr.getNextPlain());
+
+ if (curr == null) {
+ continue bin_iteration_loop;
@@ -4276,7 +6478,7 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+ * @param predicate The predicate to test key-value pairs against.
+ * @return The total number of key-value pairs removed from this map.
+ */
-+ public int removeEntryIf(final Predicate super Entry> predicate) {
++ public int removeEntryIf(final Predicate super Map.Entry> predicate) {
+ Validate.notNull(predicate, "Null predicate");
+
+ int removed = 0;
@@ -4295,7 +6497,7 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+ ++removed;
+ this.removeFromSizePlain(1); /* required in case predicate throws an exception */
+
-+ ArrayUtil.setRelease(table, i, curr = curr.getNextPlain());
++ setAtIndexRelease(table, i, curr = curr.getNextPlain());
+
+ if (curr == null) {
+ continue bin_iteration_loop;
@@ -4369,7 +6571,7 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+ return false;
+ }
+
-+ ArrayUtil.setRelease(table, index, head.getNextPlain());
++ setAtIndexRelease(table, index, head.getNextPlain());
+ this.removeFromSize(1);
+
+ return true;
@@ -4403,7 +6605,7 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+ }
+
+ if (hash == head.hash && (head.key == key || head.key.equals(key))) {
-+ ArrayUtil.setRelease(table, index, head.getNextPlain());
++ setAtIndexRelease(table, index, head.getNextPlain());
+ this.removeFromSize(1);
+
+ return head.getValuePlain();
@@ -4541,7 +6743,7 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+
+ final TableEntry insert = new TableEntry<>(hash, key, newVal);
+ if (prev == null) {
-+ ArrayUtil.setRelease(table, index, insert);
++ setAtIndexRelease(table, index, insert);
+ } else {
+ prev.setNextRelease(insert);
+ }
@@ -4560,7 +6762,7 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+ }
+
+ if (prev == null) {
-+ ArrayUtil.setRelease(table, index, curr.getNextPlain());
++ setAtIndexRelease(table, index, curr.getNextPlain());
+ } else {
+ prev.setNextRelease(curr.getNextPlain());
+ }
@@ -4596,7 +6798,7 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+ }
+
+ if (prev == null) {
-+ ArrayUtil.setRelease(table, index, curr.getNextPlain());
++ setAtIndexRelease(table, index, curr.getNextPlain());
+ } else {
+ prev.setNextRelease(curr.getNextPlain());
+ }
@@ -4637,7 +6839,7 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+
+ final TableEntry insert = new TableEntry<>(hash, key, newVal);
+ if (prev == null) {
-+ ArrayUtil.setRelease(table, index, insert);
++ setAtIndexRelease(table, index, insert);
+ } else {
+ prev.setNextRelease(insert);
+ }
@@ -4665,7 +6867,7 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+ if (curr == null) {
+ final TableEntry insert = new TableEntry<>(hash, key, value);
+ if (prev == null) {
-+ ArrayUtil.setRelease(table, index, insert);
++ setAtIndexRelease(table, index, insert);
+ } else {
+ prev.setNextRelease(insert);
+ }
@@ -4684,7 +6886,7 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+ }
+
+ if (prev == null) {
-+ ArrayUtil.setRelease(table, index, curr.getNextPlain());
++ setAtIndexRelease(table, index, curr.getNextPlain());
+ } else {
+ prev.setNextRelease(curr.getNextPlain());
+ }
@@ -4698,6 +6900,8 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+
+ protected static final class TableEntry implements Map.Entry {
+
++ protected static final VarHandle TABLE_ENTRY_ARRAY_HANDLE = ConcurrentUtil.getArrayHandle(TableEntry[].class);
++
+ protected final int hash;
+ protected final K key;
+ protected V value;
@@ -4749,35 +6953,19 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+ this.value = value;
+ }
+
-+ /**
-+ * {@inheritDoc}
-+ */
+ @Override
+ public K getKey() {
+ return this.key;
+ }
+
-+ /**
-+ * {@inheritDoc}
-+ */
+ @Override
+ public V getValue() {
+ return this.getValueAcquire();
+ }
+
-+ /**
-+ * {@inheritDoc}
-+ */
+ @Override
+ public V setValue(final V value) {
-+ if (value == null) {
-+ throw new NullPointerException();
-+ }
-+
-+ final V curr = this.getValuePlain();
-+
-+ this.setValueRelease(value);
-+ return curr;
++ throw new UnsupportedOperationException();
+ }
+
+ protected static int hash(final Object key, final Object value) {
@@ -4801,10 +6989,9 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+ return true;
+ }
+
-+ if (!(obj instanceof Map.Entry)) {
++ if (!(obj instanceof Map.Entry, ?> other)) {
+ return false;
+ }
-+ final Map.Entry, ?> other = (Map.Entry, ?>)obj;
+ final Object otherKey = other.getKey();
+ final Object otherValue = other.getValue();
+
@@ -4832,7 +7019,7 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+ this.map = map;
+ int tableIndex = 0;
+ for (int len = table.length; tableIndex < len; ++tableIndex) {
-+ final TableEntry entry = ArrayUtil.getOpaque(table, tableIndex);
++ final TableEntry entry = getAtIndexOpaque(table, tableIndex);
+ if (entry != null) {
+ this.nextEntry = entry;
+ this.tableIndex = tableIndex + 1;
@@ -4870,7 +7057,7 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+
+ // nothing in chain, so find next available bin
+ for (;tableIndex < tableLength; ++tableIndex) {
-+ next = ArrayUtil.getOpaque(table, tableIndex);
++ next = getAtIndexOpaque(table, tableIndex);
+ if (next != null) {
+ this.nextEntry = next;
+ this.tableIndex = tableIndex + 1;
@@ -5082,10 +7269,9 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+
+ @Override
+ public boolean remove(final Object object) {
-+ if (!(object instanceof Map.Entry, ?>)) {
++ if (!(object instanceof Map.Entry, ?> entry)) {
+ return false;
+ }
-+ final Map.Entry, ?> entry = (Map.Entry, ?>)object;
+
+ final Object key;
+ final Object value;
@@ -5117,21 +7303,20 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+ }
+
+ @Override
-+ public Iterator> iterator() {
++ public Iterator> iterator() {
+ return new EntryIterator<>(this.map.getTableAcquire(), this.map);
+ }
+
+ @Override
-+ public void forEach(final Consumer super Entry> action) {
++ public void forEach(final Consumer super Map.Entry> action) {
+ this.map.forEach(action);
+ }
+
+ @Override
+ public boolean contains(final Object object) {
-+ if (!(object instanceof Map.Entry)) {
++ if (!(object instanceof Map.Entry, ?> entry)) {
+ return false;
+ }
-+ final Map.Entry, ?> entry = (Map.Entry, ?>)object;
+
+ final Object key;
+ final Object value;
@@ -5275,16 +7460,17 @@ index 0000000000000000000000000000000000000000..4289b984badd6f9167c86193454a630b
+}
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/map/SWMRLong2ObjectHashTable.java b/src/main/java/ca/spottedleaf/concurrentutil/map/SWMRLong2ObjectHashTable.java
new file mode 100644
-index 0000000000000000000000000000000000000000..94fca3c9b31ca4e40688209e419e93320b0f7c34
+index 0000000000000000000000000000000000000000..bb301a9f4e3ac919552eef68afc73569d50674db
--- /dev/null
+++ b/src/main/java/ca/spottedleaf/concurrentutil/map/SWMRLong2ObjectHashTable.java
-@@ -0,0 +1,672 @@
+@@ -0,0 +1,674 @@
+package ca.spottedleaf.concurrentutil.map;
+
-+import ca.spottedleaf.concurrentutil.util.ArrayUtil;
++import ca.spottedleaf.concurrentutil.function.BiLongObjectConsumer;
+import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
++import ca.spottedleaf.concurrentutil.util.HashUtil;
++import ca.spottedleaf.concurrentutil.util.IntegerUtil;
+import ca.spottedleaf.concurrentutil.util.Validate;
-+import io.papermc.paper.util.IntegerUtil;
+import java.lang.invoke.VarHandle;
+import java.util.Arrays;
+import java.util.function.Consumer;
@@ -5370,7 +7556,7 @@ index 0000000000000000000000000000000000000000..94fca3c9b31ca4e40688209e419e9332
+ /**
+ * Constructs this map with the specified capacity and load factor.
+ * @param capacity specified capacity, > 0
-+ * @param loadFactor specified load factor, > 0 and finite
++ * @param loadFactor specified load factor, > 0 && finite
+ */
+ public SWMRLong2ObjectHashTable(final int capacity, final float loadFactor) {
+ final int tableSize = getCapacityFor(capacity);
@@ -5418,7 +7604,7 @@ index 0000000000000000000000000000000000000000..94fca3c9b31ca4e40688209e419e9332
+ * with the specified load factor.
+ * All of the specified map's entries are copied into this map.
+ * @param capacity specified capacity, > 0
-+ * @param loadFactor specified load factor, > 0 and finite
++ * @param loadFactor specified load factor, > 0 && finite
+ * @param other The specified map.
+ */
+ public SWMRLong2ObjectHashTable(final int capacity, final float loadFactor, final SWMRLong2ObjectHashTable other) {
@@ -5426,6 +7612,15 @@ index 0000000000000000000000000000000000000000..94fca3c9b31ca4e40688209e419e9332
+ this.putAll(other);
+ }
+
++ protected static TableEntry getAtIndexOpaque(final TableEntry[] table, final int index) {
++ // noinspection unchecked
++ return (TableEntry)TableEntry.TABLE_ENTRY_ARRAY_HANDLE.getOpaque(table, index);
++ }
++
++ protected static void setAtIndexRelease(final TableEntry[] table, final int index, final TableEntry value) {
++ TableEntry.TABLE_ENTRY_ARRAY_HANDLE.setRelease(table, index, value);
++ }
++
+ public final float getLoadFactor() {
+ return this.loadFactor;
+ }
@@ -5445,7 +7640,7 @@ index 0000000000000000000000000000000000000000..94fca3c9b31ca4e40688209e419e9332
+ final int hash = SWMRLong2ObjectHashTable.getHash(key);
+ final TableEntry[] table = this.getTableAcquire();
+
-+ for (TableEntry curr = ArrayUtil.getOpaque(table, hash & (table.length - 1)); curr != null; curr = curr.getNextOpaque()) {
++ for (TableEntry curr = getAtIndexOpaque(table, hash & (table.length - 1)); curr != null; curr = curr.getNextOpaque()) {
+ if (key == curr.key) {
+ return curr;
+ }
@@ -5471,7 +7666,7 @@ index 0000000000000000000000000000000000000000..94fca3c9b31ca4e40688209e419e9332
+
+ /** must be deterministic given a key */
+ protected static int getHash(final long key) {
-+ return (int)it.unimi.dsi.fastutil.HashCommon.mix(key);
++ return (int)HashUtil.mix(key);
+ }
+
+ // rets -1 if capacity*loadFactor is too large
@@ -5493,10 +7688,9 @@ index 0000000000000000000000000000000000000000..94fca3c9b31ca4e40688209e419e9332
+ return true;
+ }
+ /* Make no attempt to deal with concurrent modifications */
-+ if (!(obj instanceof SWMRLong2ObjectHashTable)) {
++ if (!(obj instanceof SWMRLong2ObjectHashTable> other)) {
+ return false;
+ }
-+ final SWMRLong2ObjectHashTable> other = (SWMRLong2ObjectHashTable>)obj;
+
+ if (this.size() != other.size()) {
+ return false;
@@ -5505,7 +7699,7 @@ index 0000000000000000000000000000000000000000..94fca3c9b31ca4e40688209e419e9332
+ final TableEntry[] table = this.getTableAcquire();
+
+ for (int i = 0, len = table.length; i < len; ++i) {
-+ for (TableEntry curr = ArrayUtil.getOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
++ for (TableEntry curr = getAtIndexOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
+ final V value = curr.getValueAcquire();
+
+ final Object otherValue = other.get(curr.key);
@@ -5528,7 +7722,7 @@ index 0000000000000000000000000000000000000000..94fca3c9b31ca4e40688209e419e9332
+ final TableEntry[] table = this.getTableAcquire();
+
+ for (int i = 0, len = table.length; i < len; ++i) {
-+ for (TableEntry curr = ArrayUtil.getOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
++ for (TableEntry curr = getAtIndexOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
+ hash += curr.hashCode();
+ }
+ }
@@ -5562,22 +7756,17 @@ index 0000000000000000000000000000000000000000..94fca3c9b31ca4e40688209e419e9332
+ /**
+ * {@inheritDoc}
+ */
-+ public void forEach(final Consumer super SWMRLong2ObjectHashTable.TableEntry> action) {
++ public void forEach(final Consumer super TableEntry> action) {
+ Validate.notNull(action, "Null action");
+
+ final TableEntry[] table = this.getTableAcquire();
+ for (int i = 0, len = table.length; i < len; ++i) {
-+ for (TableEntry curr = ArrayUtil.getOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
++ for (TableEntry curr = getAtIndexOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
+ action.accept(curr);
+ }
+ }
+ }
+
-+ @FunctionalInterface
-+ public static interface BiLongObjectConsumer {
-+ public void accept(final long key, final V value);
-+ }
-+
+ /**
+ * {@inheritDoc}
+ */
@@ -5586,7 +7775,7 @@ index 0000000000000000000000000000000000000000..94fca3c9b31ca4e40688209e419e9332
+
+ final TableEntry[] table = this.getTableAcquire();
+ for (int i = 0, len = table.length; i < len; ++i) {
-+ for (TableEntry curr = ArrayUtil.getOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
++ for (TableEntry curr = getAtIndexOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
+ final V value = curr.getValueAcquire();
+
+ action.accept(curr.key, value);
@@ -5603,7 +7792,7 @@ index 0000000000000000000000000000000000000000..94fca3c9b31ca4e40688209e419e9332
+
+ final TableEntry[] table = this.getTableAcquire();
+ for (int i = 0, len = table.length; i < len; ++i) {
-+ for (TableEntry curr = ArrayUtil.getOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
++ for (TableEntry curr = getAtIndexOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
+ action.accept(curr.key);
+ }
+ }
@@ -5618,7 +7807,7 @@ index 0000000000000000000000000000000000000000..94fca3c9b31ca4e40688209e419e9332
+
+ final TableEntry[] table = this.getTableAcquire();
+ for (int i = 0, len = table.length; i < len; ++i) {
-+ for (TableEntry curr = ArrayUtil.getOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
++ for (TableEntry curr = getAtIndexOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
+ final V value = curr.getValueAcquire();
+
+ action.accept(value);
@@ -5739,7 +7928,7 @@ index 0000000000000000000000000000000000000000..94fca3c9b31ca4e40688209e419e9332
+ final TableEntry head = table[index];
+ if (head == null) {
+ final TableEntry insert = new TableEntry<>(key, value);
-+ ArrayUtil.setRelease(table, index, insert);
++ setAtIndexRelease(table, index, insert);
+ this.addToSize(1);
+ return null;
+ }
@@ -5797,7 +7986,7 @@ index 0000000000000000000000000000000000000000..94fca3c9b31ca4e40688209e419e9332
+ }
+
+ if (head.key == key) {
-+ ArrayUtil.setRelease(table, index, head.getNextPlain());
++ setAtIndexRelease(table, index, head.getNextPlain());
+ this.removeFromSize(1);
+
+ return head.getValuePlain();
@@ -5815,6 +8004,44 @@ index 0000000000000000000000000000000000000000..94fca3c9b31ca4e40688209e419e9332
+ return null;
+ }
+
++ protected final V remove(final long key, final int hash, final V expect) {
++ final TableEntry[] table = this.getTablePlain();
++ final int index = (table.length - 1) & hash;
++
++ final TableEntry head = table[index];
++ if (head == null) {
++ return null;
++ }
++
++ if (head.key == key) {
++ final V val = head.value;
++ if (val == expect || val.equals(expect)) {
++ setAtIndexRelease(table, index, head.getNextPlain());
++ this.removeFromSize(1);
++
++ return head.getValuePlain();
++ } else {
++ return null;
++ }
++ }
++
++ for (TableEntry curr = head.getNextPlain(), prev = head; curr != null; prev = curr, curr = curr.getNextPlain()) {
++ if (key == curr.key) {
++ final V val = curr.value;
++ if (val == expect || val.equals(expect)) {
++ prev.setNextRelease(curr.getNextPlain());
++ this.removeFromSize(1);
++
++ return curr.getValuePlain();
++ } else {
++ return null;
++ }
++ }
++ }
++
++ return null;
++ }
++
+ /**
+ * {@inheritDoc}
+ */
@@ -5822,6 +8049,10 @@ index 0000000000000000000000000000000000000000..94fca3c9b31ca4e40688209e419e9332
+ return this.remove(key, SWMRLong2ObjectHashTable.getHash(key));
+ }
+
++ public boolean remove(final long key, final V expect) {
++ return this.remove(key, SWMRLong2ObjectHashTable.getHash(key), expect) != null;
++ }
++
+ /**
+ * {@inheritDoc}
+ */
@@ -5847,6 +8078,8 @@ index 0000000000000000000000000000000000000000..94fca3c9b31ca4e40688209e419e9332
+
+ public static final class TableEntry {
+
++ protected static final VarHandle TABLE_ENTRY_ARRAY_HANDLE = ConcurrentUtil.getArrayHandle(TableEntry[].class);
++
+ protected final long key;
+ protected V value;
+
@@ -5903,51 +8136,847 @@ index 0000000000000000000000000000000000000000..94fca3c9b31ca4e40688209e419e9332
+ public V getValue() {
+ return this.getValueAcquire();
+ }
++ }
++}
+diff --git a/src/main/java/ca/spottedleaf/concurrentutil/scheduler/SchedulerThreadPool.java b/src/main/java/ca/spottedleaf/concurrentutil/scheduler/SchedulerThreadPool.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..8197ccb1c4e5878dbd8007b5fb514640765ec8e4
+--- /dev/null
++++ b/src/main/java/ca/spottedleaf/concurrentutil/scheduler/SchedulerThreadPool.java
+@@ -0,0 +1,558 @@
++package ca.spottedleaf.concurrentutil.scheduler;
+
-+ /**
-+ * {@inheritDoc}
-+ */
-+ public V setValue(final V value) {
-+ if (value == null) {
-+ throw new NullPointerException();
++import ca.spottedleaf.concurrentutil.set.LinkedSortedSet;
++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
++import ca.spottedleaf.concurrentutil.util.TimeUtil;
++import java.lang.invoke.VarHandle;
++import java.util.BitSet;
++import java.util.Comparator;
++import java.util.PriorityQueue;
++import java.util.concurrent.ThreadFactory;
++import java.util.concurrent.atomic.AtomicInteger;
++import java.util.concurrent.atomic.AtomicLong;
++import java.util.concurrent.locks.LockSupport;
++import java.util.function.BooleanSupplier;
++
++public class SchedulerThreadPool {
++
++ public static final long DEADLINE_NOT_SET = Long.MIN_VALUE;
++
++ private static final Comparator TICK_COMPARATOR_BY_TIME = (final SchedulableTick t1, final SchedulableTick t2) -> {
++ final int timeCompare = TimeUtil.compareTimes(t1.scheduledStart, t2.scheduledStart);
++ if (timeCompare != 0) {
++ return timeCompare;
++ }
++
++ return Long.compare(t1.id, t2.id);
++ };
++
++ private final TickThreadRunner[] runners;
++ private final Thread[] threads;
++ private final LinkedSortedSet