diff --git a/patches/server/0019-Rewrite-chunk-system.patch b/patches/server/0019-Rewrite-chunk-system.patch
new file mode 100644
index 0000000000..c3959dbe25
--- /dev/null
+++ b/patches/server/0019-Rewrite-chunk-system.patch
@@ -0,0 +1,18241 @@
+From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
+From: Spottedleaf
+Date: Thu, 11 Mar 2021 02:32:30 -0800
+Subject: [PATCH] Rewrite chunk system
+
+== AT ==
+public net.minecraft.server.level.ChunkMap setViewDistance(I)V
+public net.minecraft.server.level.ChunkHolder pos
+public net.minecraft.server.level.ChunkMap overworldDataStorage
+public-f net.minecraft.world.level.chunk.storage.RegionFileStorage
+public net.minecraft.server.level.ChunkMap getPoiManager()Lnet/minecraft/world/entity/ai/village/poi/PoiManager;
+
+diff --git a/src/main/java/ca/spottedleaf/starlight/common/light/StarLightInterface.java b/src/main/java/ca/spottedleaf/starlight/common/light/StarLightInterface.java
+index 146c78a333e47cb4d8aa97700e19a12ca176ce76..691239e65b0870ceb0d071b57793cff9b2593f62 100644
+--- a/src/main/java/ca/spottedleaf/starlight/common/light/StarLightInterface.java
++++ b/src/main/java/ca/spottedleaf/starlight/common/light/StarLightInterface.java
+@@ -41,14 +41,14 @@ public final class StarLightInterface {
+ protected final ArrayDeque cachedSkyPropagators;
+ protected final ArrayDeque cachedBlockPropagators;
+
+- protected final LightQueue lightQueue = new LightQueue(this);
++ public final io.papermc.paper.chunk.system.light.LightQueue lightQueue; // Paper - replace light queue
+
+ protected final LayerLightEventListener skyReader;
+ protected final LayerLightEventListener blockReader;
+ protected final boolean isClientSide;
+
+- protected final int minSection;
+- protected final int maxSection;
++ public final int minSection; // Paper - public
++ public final int maxSection; // Paper - public
+ protected final int minLightSection;
+ protected final int maxLightSection;
+
+@@ -182,6 +182,7 @@ public final class StarLightInterface {
+ StarLightInterface.this.sectionChange(pos, notReady);
+ }
+ };
++ this.lightQueue = new io.papermc.paper.chunk.system.light.LightQueue(this); // Paper - replace light queue
+ }
+
+ public boolean hasSkyLight() {
+@@ -333,7 +334,7 @@ public final class StarLightInterface {
+ return this.lightAccess;
+ }
+
+- protected final SkyStarLightEngine getSkyLightEngine() {
++ public final SkyStarLightEngine getSkyLightEngine() { // Paper - public
+ if (this.cachedSkyPropagators == null) {
+ return null;
+ }
+@@ -348,7 +349,7 @@ public final class StarLightInterface {
+ return ret;
+ }
+
+- protected final void releaseSkyLightEngine(final SkyStarLightEngine engine) {
++ public final void releaseSkyLightEngine(final SkyStarLightEngine engine) { // Paper - public
+ if (this.cachedSkyPropagators == null) {
+ return;
+ }
+@@ -357,7 +358,7 @@ public final class StarLightInterface {
+ }
+ }
+
+- protected final BlockStarLightEngine getBlockLightEngine() {
++ public final BlockStarLightEngine getBlockLightEngine() { // Paper - public
+ if (this.cachedBlockPropagators == null) {
+ return null;
+ }
+@@ -372,7 +373,7 @@ public final class StarLightInterface {
+ return ret;
+ }
+
+- protected final void releaseBlockLightEngine(final BlockStarLightEngine engine) {
++ public final void releaseBlockLightEngine(final BlockStarLightEngine engine) { // Paper - public
+ if (this.cachedBlockPropagators == null) {
+ return;
+ }
+@@ -519,57 +520,15 @@ public final class StarLightInterface {
+ }
+
+ public void scheduleChunkLight(final ChunkPos pos, final Runnable run) {
+- this.lightQueue.queueChunkLighting(pos, run);
++ throw new UnsupportedOperationException("No longer implemented, use the new lightQueue field to queue tasks"); // Paper - replace light queue
+ }
+
+ public void removeChunkTasks(final ChunkPos pos) {
+- this.lightQueue.removeChunk(pos);
++ throw new UnsupportedOperationException("No longer implemented, use the new lightQueue field to queue tasks"); // Paper - replace light queue
+ }
+
+ public void propagateChanges() {
+- if (this.lightQueue.isEmpty()) {
+- return;
+- }
+-
+- final SkyStarLightEngine skyEngine = this.getSkyLightEngine();
+- final BlockStarLightEngine blockEngine = this.getBlockLightEngine();
+-
+- try {
+- LightQueue.ChunkTasks task;
+- while ((task = this.lightQueue.removeFirstTask()) != null) {
+- if (task.lightTasks != null) {
+- for (final Runnable run : task.lightTasks) {
+- run.run();
+- }
+- }
+-
+- final long coordinate = task.chunkCoordinate;
+- final int chunkX = CoordinateUtils.getChunkX(coordinate);
+- final int chunkZ = CoordinateUtils.getChunkZ(coordinate);
+-
+- final Set positions = task.changedPositions;
+- final Boolean[] sectionChanges = task.changedSectionSet;
+-
+- if (skyEngine != null && (!positions.isEmpty() || sectionChanges != null)) {
+- skyEngine.blocksChangedInChunk(this.lightAccess, chunkX, chunkZ, positions, sectionChanges);
+- }
+- if (blockEngine != null && (!positions.isEmpty() || sectionChanges != null)) {
+- blockEngine.blocksChangedInChunk(this.lightAccess, chunkX, chunkZ, positions, sectionChanges);
+- }
+-
+- if (skyEngine != null && task.queuedEdgeChecksSky != null) {
+- skyEngine.checkChunkEdges(this.lightAccess, chunkX, chunkZ, task.queuedEdgeChecksSky);
+- }
+- if (blockEngine != null && task.queuedEdgeChecksBlock != null) {
+- blockEngine.checkChunkEdges(this.lightAccess, chunkX, chunkZ, task.queuedEdgeChecksBlock);
+- }
+-
+- task.onComplete.complete(null);
+- }
+- } finally {
+- this.releaseSkyLightEngine(skyEngine);
+- this.releaseBlockLightEngine(blockEngine);
+- }
++ throw new UnsupportedOperationException("No longer implemented, task draining is now performed by the light thread"); // Paper - replace light queue
+ }
+
+ protected static final class LightQueue {
+diff --git a/src/main/java/co/aikar/timings/TimingsExport.java b/src/main/java/co/aikar/timings/TimingsExport.java
+index 38f01952153348d937e326da0ec102cd9b0f80af..43380d5e3a40b64bebdf3c0e7c48eca8998c8ac0 100644
+--- a/src/main/java/co/aikar/timings/TimingsExport.java
++++ b/src/main/java/co/aikar/timings/TimingsExport.java
+@@ -163,7 +163,11 @@ public class TimingsExport extends Thread {
+ pair("gamerules", toObjectMapper(world.getWorld().getGameRules(), rule -> {
+ return pair(rule, world.getWorld().getGameRuleValue(rule));
+ })),
+- pair("ticking-distance", world.getChunkSource().chunkMap.getEffectiveViewDistance())
++ // Paper start - replace chunk loader system
++ pair("ticking-distance", world.getChunkSource().chunkMap.playerChunkManager.getTargetTickViewDistance()),
++ pair("no-ticking-distance", world.getChunkSource().chunkMap.playerChunkManager.getTargetNoTickViewDistance()),
++ pair("sending-distance", world.getChunkSource().chunkMap.playerChunkManager.getTargetSendDistance())
++ // Paper end - replace chunk loader system
+ ));
+ }));
+
+diff --git a/src/main/java/co/aikar/timings/WorldTimingsHandler.java b/src/main/java/co/aikar/timings/WorldTimingsHandler.java
+index 2f0d9b953802dee821cfde82d22b0567cce8ee91..22687667ec69a954261e55e59261286ac1b8b8cd 100644
+--- a/src/main/java/co/aikar/timings/WorldTimingsHandler.java
++++ b/src/main/java/co/aikar/timings/WorldTimingsHandler.java
+@@ -59,6 +59,16 @@ public class WorldTimingsHandler {
+
+ public final Timing miscMobSpawning;
+
++ public final Timing poiUnload;
++ public final Timing chunkUnload;
++ public final Timing poiSaveDataSerialization;
++ public final Timing chunkSave;
++ public final Timing chunkSaveDataSerialization;
++ public final Timing chunkSaveIOWait;
++ public final Timing chunkUnloadPrepareSave;
++ public final Timing chunkUnloadPOISerialization;
++ public final Timing chunkUnloadDataSave;
++
+ public WorldTimingsHandler(Level server) {
+ String name = ((PrimaryLevelData) server.getLevelData()).getLevelName() + " - ";
+
+@@ -112,6 +122,16 @@ public class WorldTimingsHandler {
+
+
+ miscMobSpawning = Timings.ofSafe(name + "Mob spawning - Misc");
++
++ poiUnload = Timings.ofSafe(name + "Chunk unload - POI");
++ chunkUnload = Timings.ofSafe(name + "Chunk unload - Chunk");
++ poiSaveDataSerialization = Timings.ofSafe(name + "Chunk save - POI Data serialization");
++ chunkSave = Timings.ofSafe(name + "Chunk save - Chunk");
++ chunkSaveDataSerialization = Timings.ofSafe(name + "Chunk save - Chunk Data serialization");
++ chunkSaveIOWait = Timings.ofSafe(name + "Chunk save - Chunk IO Wait");
++ chunkUnloadPrepareSave = Timings.ofSafe(name + "Chunk unload - Async Save Prepare");
++ chunkUnloadPOISerialization = Timings.ofSafe(name + "Chunk unload - POI Data Serialization");
++ chunkUnloadDataSave = Timings.ofSafe(name + "Chunk unload - Data Serialization");
+ }
+
+ public static Timing getTickList(ServerLevel worldserver, String timingsType) {
+diff --git a/src/main/java/com/destroystokyo/paper/io/IOUtil.java b/src/main/java/com/destroystokyo/paper/io/IOUtil.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..e064f96c90afd1a4890060baa055cfd0469b6a6f
+--- /dev/null
++++ b/src/main/java/com/destroystokyo/paper/io/IOUtil.java
+@@ -0,0 +1,63 @@
++package com.destroystokyo.paper.io;
++
++import org.bukkit.Bukkit;
++
++@Deprecated(forRemoval = true)
++public final class IOUtil {
++
++ /* Copied from concrete or concurrentutil */
++
++ public static long getCoordinateKey(final int x, final int z) {
++ return ((long)z << 32) | (x & 0xFFFFFFFFL);
++ }
++
++ public static int getCoordinateX(final long key) {
++ return (int)key;
++ }
++
++ public static int getCoordinateZ(final long key) {
++ return (int)(key >>> 32);
++ }
++
++ public static int getRegionCoordinate(final int chunkCoordinate) {
++ return chunkCoordinate >> 5;
++ }
++
++ public static int getChunkInRegion(final int chunkCoordinate) {
++ return chunkCoordinate & 31;
++ }
++
++ public static String genericToString(final Object object) {
++ return object == null ? "null" : object.getClass().getName() + ":" + object.toString();
++ }
++
++ public static T notNull(final T obj) {
++ if (obj == null) {
++ throw new NullPointerException();
++ }
++ return obj;
++ }
++
++ public static T notNull(final T obj, final String msgIfNull) {
++ if (obj == null) {
++ throw new NullPointerException(msgIfNull);
++ }
++ return obj;
++ }
++
++ public static void arrayBounds(final int off, final int len, final int arrayLength, final String msgPrefix) {
++ if (off < 0 || len < 0 || (arrayLength - off) < len) {
++ throw new ArrayIndexOutOfBoundsException(msgPrefix + ": off: " + off + ", len: " + len + ", array length: " + arrayLength);
++ }
++ }
++
++ public static int getPriorityForCurrentThread() {
++ return Bukkit.isPrimaryThread() ? PrioritizedTaskQueue.HIGHEST_PRIORITY : PrioritizedTaskQueue.NORMAL_PRIORITY;
++ }
++
++ @SuppressWarnings("unchecked")
++ public static void rethrow(final Throwable throwable) throws T {
++ throw (T)throwable;
++ }
++
++}
+diff --git a/src/main/java/com/destroystokyo/paper/io/PaperFileIOThread.java b/src/main/java/com/destroystokyo/paper/io/PaperFileIOThread.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..f2c27e0ac65be4b75c1d86ef6fd45fdb538d96ac
+--- /dev/null
++++ b/src/main/java/com/destroystokyo/paper/io/PaperFileIOThread.java
+@@ -0,0 +1,474 @@
++package com.destroystokyo.paper.io;
++
++import com.mojang.logging.LogUtils;
++import net.minecraft.nbt.CompoundTag;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.world.level.ChunkPos;
++import net.minecraft.world.level.chunk.storage.RegionFile;
++import org.slf4j.Logger;
++
++import java.io.IOException;
++import java.util.concurrent.CompletableFuture;
++import java.util.concurrent.ConcurrentHashMap;
++import java.util.concurrent.atomic.AtomicLong;
++import java.util.function.Consumer;
++import java.util.function.Function;
++
++/**
++ * Prioritized singleton thread responsible for all chunk IO that occurs in a minecraft server.
++ *
++ *
++ * Singleton access: {@link Holder#INSTANCE}
++ *
++ *
++ *
++ * All functions provided are MT-Safe, however certain ordering constraints are (but not enforced):
++ *
++ * Chunk saves may not occur for unloaded chunks.
++ *
++ *
++ * Tasks must be scheduled on the main thread.
++ *
++ *
++ *
++ * @see Holder#INSTANCE
++ * @see #scheduleSave(ServerLevel, int, int, CompoundTag, CompoundTag, int)
++ * @see #loadChunkDataAsync(ServerLevel, int, int, int, Consumer, boolean, boolean, boolean)
++ * @deprecated
++ */
++@Deprecated(forRemoval = true)
++public final class PaperFileIOThread extends QueueExecutorThread {
++
++ public static final Logger LOGGER = LogUtils.getLogger();
++ public static final CompoundTag FAILURE_VALUE = new CompoundTag();
++
++ public static final class Holder {
++
++ public static final PaperFileIOThread INSTANCE = new PaperFileIOThread();
++
++ static {
++ // Paper - fail hard on usage
++ }
++ }
++
++ private final AtomicLong writeCounter = new AtomicLong();
++
++ private PaperFileIOThread() {
++ super(new PrioritizedTaskQueue<>(), (int)(1.0e6)); // 1.0ms spinwait time
++ this.setName("Paper RegionFile IO Thread");
++ this.setPriority(Thread.NORM_PRIORITY - 1); // we keep priority close to normal because threads can wait on us
++ this.setUncaughtExceptionHandler((final Thread unused, final Throwable thr) -> {
++ LOGGER.error("Uncaught exception thrown from IO thread, report this!", thr);
++ });
++ }
++
++ /* run() is implemented by superclass */
++
++ /*
++ *
++ * IO thread will perform reads before writes
++ *
++ * How reads/writes are scheduled:
++ *
++ * If read in progress while scheduling write, ignore read and schedule write
++ * If read in progress while scheduling read (no write in progress), chain the read task
++ *
++ *
++ * If write in progress while scheduling read, use the pending write data and ret immediately
++ * If write in progress while scheduling write (ignore read in progress), overwrite the write in progress data
++ *
++ * This allows the reads and writes to act as if they occur synchronously to the thread scheduling them, however
++ * it fails to properly propagate write failures. When writes fail the data is kept so future reads will actually
++ * read the failed write data. This should hopefully act as a way to prevent data loss for spurious fails for writing data.
++ *
++ */
++
++ /**
++ * Attempts to bump the priority of all IO tasks for the given chunk coordinates. This has no effect if no tasks are queued.
++ * @param world Chunk's world
++ * @param chunkX Chunk's x coordinate
++ * @param chunkZ Chunk's z coordinate
++ * @param priority Priority level to try to bump to
++ */
++ public void bumpPriority(final ServerLevel world, final int chunkX, final int chunkZ, final int priority) {
++ throw new IllegalStateException("Shouldn't get here, use RegionFileIOThread"); // Paper - rewrite chunk system, fail hard on usage
++ }
++
++ public CompoundTag getPendingWrite(final ServerLevel world, final int chunkX, final int chunkZ, final boolean poiData) {
++ // Paper start - rewrite chunk system
++ return io.papermc.paper.chunk.system.io.RegionFileIOThread.getPendingWrite(
++ world, chunkX, chunkZ, poiData ? io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.POI_DATA :
++ io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.CHUNK_DATA
++ );
++ // Paper end - rewrite chunk system
++ }
++
++ /**
++ * Sets the priority of all IO tasks for the given chunk coordinates. This has no effect if no tasks are queued.
++ * @param world Chunk's world
++ * @param chunkX Chunk's x coordinate
++ * @param chunkZ Chunk's z coordinate
++ * @param priority Priority level to set to
++ */
++ public void setPriority(final ServerLevel world, final int chunkX, final int chunkZ, final int priority) {
++ throw new IllegalStateException("Shouldn't get here, use RegionFileIOThread"); // Paper - rewrite chunk system, fail hard on usage
++ }
++
++ /**
++ * Schedules the chunk data to be written asynchronously.
++ *
++ * Impl notes:
++ *
++ *
++ * This function presumes a chunk load for the coordinates is not called during this function (anytime after is OK). This means
++ * saves must be scheduled before a chunk is unloaded.
++ *
++ *
++ * Writes may be called concurrently, although only the "later" write will go through.
++ *
++ * @param world Chunk's world
++ * @param chunkX Chunk's x coordinate
++ * @param chunkZ Chunk's z coordinate
++ * @param poiData Chunk point of interest data. If {@code null}, then no poi data is saved.
++ * @param chunkData Chunk data. If {@code null}, then no chunk data is saved.
++ * @param priority Priority level for this task. See {@link PrioritizedTaskQueue}
++ * @throws IllegalArgumentException If both {@code poiData} and {@code chunkData} are {@code null}.
++ * @throws IllegalStateException If the file io thread has shutdown.
++ */
++ public void scheduleSave(final ServerLevel world, final int chunkX, final int chunkZ,
++ final CompoundTag poiData, final CompoundTag chunkData,
++ final int priority) throws IllegalArgumentException {
++ throw new IllegalStateException("Shouldn't get here, use RegionFileIOThread"); // Paper - rewrite chunk system, fail hard on usage
++ }
++
++ private void scheduleWrite(final ChunkDataController dataController, final ServerLevel world,
++ final int chunkX, final int chunkZ, final CompoundTag data, final int priority, final long writeCounter) {
++ throw new IllegalStateException("Shouldn't get here, use RegionFileIOThread"); // Paper - rewrite chunk system, fail hard on usage
++ }
++
++ /**
++ * Same as {@link #loadChunkDataAsync(ServerLevel, int, int, int, Consumer, boolean, boolean, boolean)}, except this function returns
++ * a {@link CompletableFuture} which is potentially completed ASYNCHRONOUSLY ON THE FILE IO THREAD when the load task
++ * has completed.
++ *
++ * Note that if the chunk fails to load the returned future is completed with {@code null}.
++ *
++ */
++ public CompletableFuture loadChunkDataAsyncFuture(final ServerLevel world, final int chunkX, final int chunkZ,
++ final int priority, final boolean readPoiData, final boolean readChunkData,
++ final boolean intendingToBlock) {
++ final CompletableFuture future = new CompletableFuture<>();
++ this.loadChunkDataAsync(world, chunkX, chunkZ, priority, future::complete, readPoiData, readChunkData, intendingToBlock);
++ return future;
++ }
++
++ /**
++ * Schedules a load to be executed asynchronously.
++ *
++ * Impl notes:
++ *
++ *
++ * If a chunk fails to load, the {@code onComplete} parameter is completed with {@code null}.
++ *
++ *
++ * It is possible for the {@code onComplete} parameter to be given {@link ChunkData} containing data
++ * this call did not request.
++ *
++ *
++ * The {@code onComplete} parameter may be completed during the execution of this function synchronously or it may
++ * be completed asynchronously on this file io thread. Interacting with the file IO thread in the completion of
++ * data is undefined behaviour, and can cause deadlock.
++ *
++ * @param world Chunk's world
++ * @param chunkX Chunk's x coordinate
++ * @param chunkZ Chunk's z coordinate
++ * @param priority Priority level for this task. See {@link PrioritizedTaskQueue}
++ * @param onComplete Consumer to execute once this task has completed
++ * @param readPoiData Whether to read point of interest data. If {@code false}, the {@code NBTTagCompound} will be {@code null}.
++ * @param readChunkData Whether to read chunk data. If {@code false}, the {@code NBTTagCompound} will be {@code null}.
++ * @return The {@link PrioritizedTaskQueue.PrioritizedTask} associated with this task. Note that this task does not support
++ * cancellation.
++ */
++ public void loadChunkDataAsync(final ServerLevel world, final int chunkX, final int chunkZ,
++ final int priority, final Consumer onComplete,
++ final boolean readPoiData, final boolean readChunkData,
++ final boolean intendingToBlock) {
++ if (!PrioritizedTaskQueue.validPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority: " + priority);
++ }
++
++ if (!(readPoiData | readChunkData)) {
++ throw new IllegalArgumentException("Must read chunk data or poi data");
++ }
++
++ final ChunkData complete = new ChunkData();
++ // Paper start - rewrite chunk system
++ final java.util.List types = new java.util.ArrayList<>();
++ if (readPoiData) {
++ types.add(io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.POI_DATA);
++ }
++ if (readChunkData) {
++ types.add(io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.CHUNK_DATA);
++ }
++ final ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority newPriority;
++ switch (priority) {
++ case PrioritizedTaskQueue.HIGHEST_PRIORITY -> newPriority = ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority.BLOCKING;
++ case PrioritizedTaskQueue.HIGHER_PRIORITY -> newPriority = ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority.HIGHEST;
++ case PrioritizedTaskQueue.HIGH_PRIORITY -> newPriority = ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority.HIGH;
++ case PrioritizedTaskQueue.NORMAL_PRIORITY -> newPriority = ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority.NORMAL;
++ case PrioritizedTaskQueue.LOW_PRIORITY -> newPriority = ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority.LOW;
++ case PrioritizedTaskQueue.LOWEST_PRIORITY -> newPriority = ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority.IDLE;
++ default -> throw new IllegalStateException("Legacy priority " + priority + " should be valid");
++ }
++ final Consumer transformComplete = (io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileData data) -> {
++ if (readPoiData) {
++ if (data.getThrowable(io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.POI_DATA) != null) {
++ complete.poiData = FAILURE_VALUE;
++ } else {
++ complete.poiData = data.getData(io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.POI_DATA);
++ }
++ }
++
++ if (readChunkData) {
++ if (data.getThrowable(io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.CHUNK_DATA) != null) {
++ complete.chunkData = FAILURE_VALUE;
++ } else {
++ complete.chunkData = data.getData(io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.CHUNK_DATA);
++ }
++ }
++
++ onComplete.accept(complete);
++ };
++ io.papermc.paper.chunk.system.io.RegionFileIOThread.loadChunkData(world, chunkX, chunkZ, transformComplete, intendingToBlock, newPriority, types.toArray(new io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType[0]));
++ // Paper end - rewrite chunk system
++
++ }
++
++ // Note: the onComplete may be called asynchronously or synchronously here.
++ private void scheduleRead(final ChunkDataController dataController, final ServerLevel world,
++ final int chunkX, final int chunkZ, final Consumer onComplete, final int priority,
++ final boolean intendingToBlock) {
++ throw new IllegalStateException("Shouldn't get here, use RegionFileIOThread"); // Paper - rewrite chunk system, fail hard on usage
++ }
++
++ /**
++ * Same as {@link #loadChunkDataAsync(ServerLevel, int, int, int, Consumer, boolean, boolean, boolean)}, except this function returns
++ * the {@link ChunkData} associated with the specified chunk when the task is complete.
++ * @return The chunk data, or {@code null} if the chunk failed to load.
++ */
++ public ChunkData loadChunkData(final ServerLevel world, final int chunkX, final int chunkZ, final int priority,
++ final boolean readPoiData, final boolean readChunkData) {
++ return this.loadChunkDataAsyncFuture(world, chunkX, chunkZ, priority, readPoiData, readChunkData, true).join();
++ }
++
++ /**
++ * Schedules the given task at the specified priority to be executed on the IO thread.
++ *
++ * Internal api. Do not use.
++ *
++ */
++ public void runTask(final int priority, final Runnable runnable) {
++ throw new IllegalStateException("Shouldn't get here, use RegionFileIOThread"); // Paper - rewrite chunk system, fail hard on usage
++ }
++
++ static final class GeneralTask extends PrioritizedTaskQueue.PrioritizedTask implements Runnable {
++
++ private final Runnable run;
++
++ public GeneralTask(final int priority, final Runnable run) {
++ super(priority);
++ this.run = IOUtil.notNull(run, "Task may not be null");
++ }
++
++ @Override
++ public void run() {
++ try {
++ this.run.run();
++ } catch (final Throwable throwable) {
++ if (throwable instanceof ThreadDeath) {
++ throw (ThreadDeath)throwable;
++ }
++ LOGGER.error("Failed to execute general task on IO thread " + IOUtil.genericToString(this.run), throwable);
++ }
++ }
++ }
++
++ public static final class ChunkData {
++
++ public CompoundTag poiData;
++ public CompoundTag chunkData;
++
++ public ChunkData() {}
++
++ public ChunkData(final CompoundTag poiData, final CompoundTag chunkData) {
++ this.poiData = poiData;
++ this.chunkData = chunkData;
++ }
++ }
++
++ public static abstract class ChunkDataController {
++
++ // ConcurrentHashMap synchronizes per chain, so reduce the chance of task's hashes colliding.
++ public final ConcurrentHashMap tasks = new ConcurrentHashMap<>(64, 0.5f);
++
++ public abstract void writeData(final int x, final int z, final CompoundTag compound) throws IOException;
++ public abstract CompoundTag readData(final int x, final int z) throws IOException;
++
++ public abstract T computeForRegionFile(final int chunkX, final int chunkZ, final Function function);
++ public abstract T computeForRegionFileIfLoaded(final int chunkX, final int chunkZ, final Function function);
++
++ public static final class InProgressWrite {
++ public long writeCounter;
++ public CompoundTag data;
++ }
++
++ public static final class InProgressRead {
++ public final CompletableFuture readFuture = new CompletableFuture<>();
++ }
++ }
++
++ public static final class ChunkDataTask extends PrioritizedTaskQueue.PrioritizedTask implements Runnable {
++
++ public ChunkDataController.InProgressWrite inProgressWrite;
++ public ChunkDataController.InProgressRead inProgressRead;
++
++ private final ServerLevel world;
++ private final int x;
++ private final int z;
++ private final ChunkDataController taskController;
++
++ public ChunkDataTask(final int priority, final ServerLevel world, final int x, final int z, final ChunkDataController taskController) {
++ super(priority);
++ this.world = world;
++ this.x = x;
++ this.z = z;
++ this.taskController = taskController;
++ }
++
++ @Override
++ public String toString() {
++ return "Task for world: '" + this.world.getWorld().getName() + "' at " + this.x + "," + this.z +
++ " poi: " + (this.taskController == null) + ", hash: " + this.hashCode(); // Paper - TODO rewrite chunk system
++ }
++
++ /*
++ *
++ * IO thread will perform reads before writes
++ *
++ * How reads/writes are scheduled:
++ *
++ * If read in progress while scheduling write, ignore read and schedule write
++ * If read in progress while scheduling read (no write in progress), chain the read task
++ *
++ *
++ * If write in progress while scheduling read, use the pending write data and ret immediately
++ * If write in progress while scheduling write (ignore read in progress), overwrite the write in progress data
++ *
++ * This allows the reads and writes to act as if they occur synchronously to the thread scheduling them, however
++ * it fails to properly propagate write failures
++ *
++ */
++
++ void reschedule(final int priority) {
++ // priority is checked before this stage // TODO what
++ this.queue.lazySet(null);
++ this.priority.lazySet(priority);
++ PaperFileIOThread.Holder.INSTANCE.queueTask(this);
++ }
++
++ @Override
++ public void run() {
++ if (true) throw new IllegalStateException("Shouldn't get here, use RegionFileIOThread"); // Paper - rewrite chunk system, fail hard on usage
++ ChunkDataController.InProgressRead read = this.inProgressRead;
++ if (read != null) {
++ CompoundTag compound = PaperFileIOThread.FAILURE_VALUE;
++ try {
++ compound = this.taskController.readData(this.x, this.z);
++ } catch (final Throwable thr) {
++ if (thr instanceof ThreadDeath) {
++ throw (ThreadDeath)thr;
++ }
++ LOGGER.error("Failed to read chunk data for task: " + this.toString(), thr);
++ // fall through to complete with null data
++ }
++ read.readFuture.complete(compound);
++ }
++
++ final Long chunkKey = Long.valueOf(IOUtil.getCoordinateKey(this.x, this.z));
++
++ ChunkDataController.InProgressWrite write = this.inProgressWrite;
++
++ if (write == null) {
++ // IntelliJ warns this is invalid, however it does not consider that writes to the task map & the inProgress field can occur concurrently.
++ ChunkDataTask inMap = this.taskController.tasks.compute(chunkKey, (final Long keyInMap, final ChunkDataTask valueInMap) -> {
++ if (valueInMap == null) {
++ throw new IllegalStateException("Write completed concurrently, expected this task: " + ChunkDataTask.this.toString() + ", report this!");
++ }
++ if (valueInMap != ChunkDataTask.this) {
++ throw new IllegalStateException("Chunk task mismatch, expected this task: " + ChunkDataTask.this.toString() + ", got: " + valueInMap.toString() + ", report this!");
++ }
++ return valueInMap.inProgressWrite == null ? null : valueInMap;
++ });
++
++ if (inMap == null) {
++ return; // set the task value to null, indicating we're done
++ }
++
++ // not null, which means there was a concurrent write
++ write = this.inProgressWrite;
++ }
++
++ for (;;) {
++ final long writeCounter;
++ final CompoundTag data;
++
++ //noinspection SynchronizationOnLocalVariableOrMethodParameter
++ synchronized (write) {
++ writeCounter = write.writeCounter;
++ data = write.data;
++ }
++
++ boolean failedWrite = false;
++
++ try {
++ this.taskController.writeData(this.x, this.z, data);
++ } catch (final Throwable thr) {
++ if (thr instanceof ThreadDeath) {
++ throw (ThreadDeath)thr;
++ }
++ LOGGER.error("Failed to write chunk data for task: " + this.toString(), thr);
++ failedWrite = true;
++ }
++
++ boolean finalFailWrite = failedWrite;
++
++ ChunkDataTask inMap = this.taskController.tasks.compute(chunkKey, (final Long keyInMap, final ChunkDataTask valueInMap) -> {
++ if (valueInMap == null) {
++ throw new IllegalStateException("Write completed concurrently, expected this task: " + ChunkDataTask.this.toString() + ", report this!");
++ }
++ if (valueInMap != ChunkDataTask.this) {
++ throw new IllegalStateException("Chunk task mismatch, expected this task: " + ChunkDataTask.this.toString() + ", got: " + valueInMap.toString() + ", report this!");
++ }
++ if (valueInMap.inProgressWrite.writeCounter == writeCounter) {
++ if (finalFailWrite) {
++ valueInMap.inProgressWrite.writeCounter = -1L;
++ }
++
++ return null;
++ }
++ return valueInMap;
++ // Hack end
++ });
++
++ if (inMap == null) {
++ // write counter matched, so we wrote the most up-to-date pending data, we're done here
++ // or we failed to write and successfully set the write counter to -1
++ return; // we're done here
++ }
++
++ // fetch & write new data
++ continue;
++ }
++ }
++ }
++}
+diff --git a/src/main/java/com/destroystokyo/paper/io/PrioritizedTaskQueue.java b/src/main/java/com/destroystokyo/paper/io/PrioritizedTaskQueue.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..7844a3515430472bd829ff246396bceb0797de1b
+--- /dev/null
++++ b/src/main/java/com/destroystokyo/paper/io/PrioritizedTaskQueue.java
+@@ -0,0 +1,299 @@
++package com.destroystokyo.paper.io;
++
++import java.util.concurrent.ConcurrentLinkedQueue;
++import java.util.concurrent.atomic.AtomicBoolean;
++import java.util.concurrent.atomic.AtomicInteger;
++import java.util.concurrent.atomic.AtomicReference;
++
++@Deprecated(forRemoval = true)
++public class PrioritizedTaskQueue {
++
++ // lower numbers are a higher priority (except < 0)
++ // higher priorities are always executed before lower priorities
++
++ /**
++ * Priority value indicating the task has completed or is being completed.
++ */
++ public static final int COMPLETING_PRIORITY = -1;
++
++ /**
++ * Highest priority, should only be used for main thread tasks or tasks that are blocking the main thread.
++ */
++ public static final int HIGHEST_PRIORITY = 0;
++
++ /**
++ * Should be only used in an IO task so that chunk loads do not wait on other IO tasks.
++ * This only exists because IO tasks are scheduled before chunk load tasks to decrease IO waiting times.
++ */
++ public static final int HIGHER_PRIORITY = 1;
++
++ /**
++ * Should be used for scheduling chunk loads/generation that would increase response times to users.
++ */
++ public static final int HIGH_PRIORITY = 2;
++
++ /**
++ * Default priority.
++ */
++ public static final int NORMAL_PRIORITY = 3;
++
++ /**
++ * Use for tasks not at all critical and can potentially be delayed.
++ */
++ public static final int LOW_PRIORITY = 4;
++
++ /**
++ * Use for tasks that should "eventually" execute.
++ */
++ public static final int LOWEST_PRIORITY = 5;
++
++ private static final int TOTAL_PRIORITIES = 6;
++
++ final ConcurrentLinkedQueue[] queues = (ConcurrentLinkedQueue[])new ConcurrentLinkedQueue[TOTAL_PRIORITIES];
++
++ private final AtomicBoolean shutdown = new AtomicBoolean();
++
++ {
++ for (int i = 0; i < TOTAL_PRIORITIES; ++i) {
++ this.queues[i] = new ConcurrentLinkedQueue<>();
++ }
++ }
++
++ /**
++ * Returns whether the specified priority is valid
++ */
++ public static boolean validPriority(final int priority) {
++ return priority >= 0 && priority < TOTAL_PRIORITIES;
++ }
++
++ /**
++ * Queues a task.
++ * @throws IllegalStateException If the task has already been queued. Use {@link PrioritizedTask#raisePriority(int)} to
++ * raise a task's priority.
++ * This can also be thrown if the queue has shutdown.
++ */
++ public void add(final T task) throws IllegalStateException {
++ int priority = task.getPriority();
++ if (priority != COMPLETING_PRIORITY) {
++ task.setQueue(this);
++ this.queues[priority].add(task);
++ }
++ if (this.shutdown.get()) {
++ // note: we're not actually sure at this point if our task will go through
++ throw new IllegalStateException("Queue has shutdown, refusing to execute task " + IOUtil.genericToString(task));
++ }
++ }
++
++ /**
++ * Polls the highest priority task currently available. {@code null} if none.
++ */
++ public T poll() {
++ T task;
++ for (int i = 0; i < TOTAL_PRIORITIES; ++i) {
++ final ConcurrentLinkedQueue queue = this.queues[i];
++
++ while ((task = queue.poll()) != null) {
++ final int prevPriority = task.tryComplete(i);
++ if (prevPriority != COMPLETING_PRIORITY && prevPriority <= i) {
++ // if the prev priority was greater-than or equal to our current priority
++ return task;
++ }
++ }
++ }
++
++ return null;
++ }
++
++ /**
++ * Polls the highest priority task currently available. {@code null} if none.
++ */
++ public T poll(final int lowestPriority) {
++ T task;
++ final int max = Math.min(LOWEST_PRIORITY, lowestPriority);
++ for (int i = 0; i <= max; ++i) {
++ final ConcurrentLinkedQueue queue = this.queues[i];
++
++ while ((task = queue.poll()) != null) {
++ final int prevPriority = task.tryComplete(i);
++ if (prevPriority != COMPLETING_PRIORITY && prevPriority <= i) {
++ // if the prev priority was greater-than or equal to our current priority
++ return task;
++ }
++ }
++ }
++
++ return null;
++ }
++
++ /**
++ * Returns whether this queue may have tasks queued.
++ *
++ * This operation is not atomic, but is MT-Safe.
++ *
++ * @return {@code true} if tasks may be queued, {@code false} otherwise
++ */
++ public boolean hasTasks() {
++ for (int i = 0; i < TOTAL_PRIORITIES; ++i) {
++ final ConcurrentLinkedQueue queue = this.queues[i];
++
++ if (queue.peek() != null) {
++ return true;
++ }
++ }
++ return false;
++ }
++
++ /**
++ * Prevent further additions to this queue. Attempts to add after this call has completed (potentially during) will
++ * result in {@link IllegalStateException} being thrown.
++ *
++ * This operation is atomic with respect to other shutdown calls
++ *
++ *
++ * After this call has completed, regardless of return value, this queue will be shutdown.
++ *
++ * @return {@code true} if the queue was shutdown, {@code false} if it has shut down already
++ */
++ public boolean shutdown() {
++ return this.shutdown.getAndSet(false);
++ }
++
++ public abstract static class PrioritizedTask {
++
++ protected final AtomicReference queue = new AtomicReference<>();
++
++ protected final AtomicInteger priority;
++
++ protected PrioritizedTask() {
++ this(PrioritizedTaskQueue.NORMAL_PRIORITY);
++ }
++
++ protected PrioritizedTask(final int priority) {
++ if (!PrioritizedTaskQueue.validPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority " + priority);
++ }
++ this.priority = new AtomicInteger(priority);
++ }
++
++ /**
++ * Returns the current priority. Note that {@link PrioritizedTaskQueue#COMPLETING_PRIORITY} will be returned
++ * if this task is completing or has completed.
++ */
++ public final int getPriority() {
++ return this.priority.get();
++ }
++
++ /**
++ * Returns whether this task is scheduled to execute, or has been already executed.
++ */
++ public boolean isScheduled() {
++ return this.queue.get() != null;
++ }
++
++ final int tryComplete(final int minPriority) {
++ for (int curr = this.getPriorityVolatile();;) {
++ if (curr == COMPLETING_PRIORITY) {
++ return COMPLETING_PRIORITY;
++ }
++ if (curr > minPriority) {
++ // curr is lower priority
++ return curr;
++ }
++
++ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, COMPLETING_PRIORITY))) {
++ return curr;
++ }
++ continue;
++ }
++ }
++
++ /**
++ * Forces this task to be completed.
++ * @return {@code true} if the task was cancelled, {@code false} if the task has already completed or is being completed.
++ */
++ public boolean cancel() {
++ return this.exchangePriorityVolatile(PrioritizedTaskQueue.COMPLETING_PRIORITY) != PrioritizedTaskQueue.COMPLETING_PRIORITY;
++ }
++
++ /**
++ * Attempts to raise the priority to the priority level specified.
++ * @param priority Priority specified
++ * @return {@code true} if successful, {@code false} otherwise.
++ */
++ public boolean raisePriority(final int priority) {
++ if (!PrioritizedTaskQueue.validPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority");
++ }
++
++ for (int curr = this.getPriorityVolatile();;) {
++ if (curr == COMPLETING_PRIORITY) {
++ return false;
++ }
++ if (priority >= curr) {
++ return true;
++ }
++
++ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, priority))) {
++ PrioritizedTaskQueue queue = this.queue.get();
++ if (queue != null) {
++ //noinspection unchecked
++ queue.queues[priority].add(this); // silently fail on shutdown
++ }
++ return true;
++ }
++ continue;
++ }
++ }
++
++ /**
++ * Attempts to set this task's priority level to the level specified.
++ * @param priority Specified priority level.
++ * @return {@code true} if successful, {@code false} if this task is completing or has completed.
++ */
++ public boolean updatePriority(final int priority) {
++ if (!PrioritizedTaskQueue.validPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority");
++ }
++
++ for (int curr = this.getPriorityVolatile();;) {
++ if (curr == COMPLETING_PRIORITY) {
++ return false;
++ }
++ if (curr == priority) {
++ return true;
++ }
++
++ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, priority))) {
++ PrioritizedTaskQueue queue = this.queue.get();
++ if (queue != null) {
++ //noinspection unchecked
++ queue.queues[priority].add(this); // silently fail on shutdown
++ }
++ return true;
++ }
++ continue;
++ }
++ }
++
++ void setQueue(final PrioritizedTaskQueue queue) {
++ this.queue.set(queue);
++ }
++
++ /* priority */
++
++ protected final int getPriorityVolatile() {
++ return this.priority.get();
++ }
++
++ protected final int compareAndExchangePriorityVolatile(final int expect, final int update) {
++ if (this.priority.compareAndSet(expect, update)) {
++ return expect;
++ }
++ return this.priority.get();
++ }
++
++ protected final int exchangePriorityVolatile(final int value) {
++ return this.priority.getAndSet(value);
++ }
++ }
++}
+diff --git a/src/main/java/com/destroystokyo/paper/io/QueueExecutorThread.java b/src/main/java/com/destroystokyo/paper/io/QueueExecutorThread.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..99f49b5625cf51d6c97640553cf5c420bb6fdd36
+--- /dev/null
++++ b/src/main/java/com/destroystokyo/paper/io/QueueExecutorThread.java
+@@ -0,0 +1,255 @@
++package com.destroystokyo.paper.io;
++
++import com.mojang.logging.LogUtils;
++import org.slf4j.Logger;
++
++import java.util.concurrent.ConcurrentLinkedQueue;
++import java.util.concurrent.atomic.AtomicBoolean;
++import java.util.concurrent.locks.LockSupport;
++
++@Deprecated(forRemoval = true)
++public class QueueExecutorThread extends Thread {
++
++ private static final Logger LOGGER = LogUtils.getLogger();
++
++ protected final PrioritizedTaskQueue queue;
++ protected final long spinWaitTime;
++
++ protected volatile boolean closed;
++
++ protected final AtomicBoolean parked = new AtomicBoolean();
++
++ protected volatile ConcurrentLinkedQueue flushQueue = new ConcurrentLinkedQueue<>();
++ protected volatile long flushCycles;
++
++ protected int lowestPriorityToPoll = PrioritizedTaskQueue.LOWEST_PRIORITY;
++
++ public int getLowestPriorityToPoll() {
++ return this.lowestPriorityToPoll;
++ }
++
++ public void setLowestPriorityToPoll(final int lowestPriorityToPoll) {
++ if (this.isAlive()) {
++ throw new IllegalStateException("Cannot set after starting");
++ }
++ this.lowestPriorityToPoll = lowestPriorityToPoll;
++ }
++
++ public QueueExecutorThread(final PrioritizedTaskQueue queue) {
++ this(queue, (int)(1.e6)); // 1.0ms
++ }
++
++ public QueueExecutorThread(final PrioritizedTaskQueue queue, final long spinWaitTime) { // in ms
++ this.queue = queue;
++ this.spinWaitTime = spinWaitTime;
++ }
++
++ @Override
++ public void run() {
++ final long spinWaitTime = this.spinWaitTime;
++ main_loop:
++ for (;;) {
++ this.pollTasks(true);
++
++ // spinwait
++
++ final long start = System.nanoTime();
++
++ for (;;) {
++ // If we are interrpted for any reason, park() will always return immediately. Clear so that we don't needlessly use cpu in such an event.
++ Thread.interrupted();
++ LockSupport.parkNanos("Spinwaiting on tasks", 1000L); // 1us
++
++ if (this.pollTasks(true)) {
++ // restart loop, found tasks
++ continue main_loop;
++ }
++
++ if (this.handleClose()) {
++ return; // we're done
++ }
++
++ if ((System.nanoTime() - start) >= spinWaitTime) {
++ break;
++ }
++ }
++
++ if (this.handleClose()) {
++ return;
++ }
++
++ this.parked.set(true);
++
++ // We need to parse here to avoid a race condition where a thread queues a task before we set parked to true
++ // (i.e it will not notify us)
++ if (this.pollTasks(true)) {
++ this.parked.set(false);
++ continue;
++ }
++
++ if (this.handleClose()) {
++ return;
++ }
++
++ // we don't need to check parked before sleeping, but we do need to check parked in a do-while loop
++ // LockSupport.park() can fail for any reason
++ do {
++ Thread.interrupted();
++ LockSupport.park("Waiting on tasks");
++ } while (this.parked.get());
++ }
++ }
++
++ protected boolean handleClose() {
++ if (this.closed) {
++ this.pollTasks(true); // this ensures we've emptied the queue
++ this.handleFlushThreads(true);
++ return true;
++ }
++ return false;
++ }
++
++ protected boolean pollTasks(boolean flushTasks) {
++ Runnable task;
++ boolean ret = false;
++
++ while ((task = this.queue.poll(this.lowestPriorityToPoll)) != null) {
++ ret = true;
++ try {
++ task.run();
++ } catch (final Throwable throwable) {
++ if (throwable instanceof ThreadDeath) {
++ throw (ThreadDeath)throwable;
++ }
++ LOGGER.error("Exception thrown from prioritized runnable task in thread '" + this.getName() + "': " + IOUtil.genericToString(task), throwable);
++ }
++ }
++
++ if (flushTasks) {
++ this.handleFlushThreads(false);
++ }
++
++ return ret;
++ }
++
++ protected void handleFlushThreads(final boolean shutdown) {
++ Thread parking;
++ ConcurrentLinkedQueue flushQueue = this.flushQueue;
++ do {
++ ++flushCycles; // may be plain read opaque write
++ while ((parking = flushQueue.poll()) != null) {
++ LockSupport.unpark(parking);
++ }
++ } while (this.pollTasks(false));
++
++ if (shutdown) {
++ this.flushQueue = null;
++
++ // defend against a race condition where a flush thread double-checks right before we set to null
++ while ((parking = flushQueue.poll()) != null) {
++ LockSupport.unpark(parking);
++ }
++ }
++ }
++
++ /**
++ * Notify's this thread that a task has been added to its queue
++ * @return {@code true} if this thread was waiting for tasks, {@code false} if it is executing tasks
++ */
++ public boolean notifyTasks() {
++ if (this.parked.get() && this.parked.getAndSet(false)) {
++ LockSupport.unpark(this);
++ return true;
++ }
++ return false;
++ }
++
++ protected void queueTask(final T task) {
++ this.queue.add(task);
++ this.notifyTasks();
++ }
++
++ /**
++ * Waits until this thread's queue is empty.
++ *
++ * @throws IllegalStateException If the current thread is {@code this} thread.
++ */
++ public void flush() {
++ final Thread currentThread = Thread.currentThread();
++
++ if (currentThread == this) {
++ // avoid deadlock
++ throw new IllegalStateException("Cannot flush the queue executor thread while on the queue executor thread");
++ }
++
++ // order is important
++
++ int successes = 0;
++ long lastCycle = -1L;
++
++ do {
++ final ConcurrentLinkedQueue flushQueue = this.flushQueue;
++ if (flushQueue == null) {
++ return;
++ }
++
++ flushQueue.add(currentThread);
++
++ // double check flush queue
++ if (this.flushQueue == null) {
++ return;
++ }
++
++ final long currentCycle = this.flushCycles; // may be opaque read
++
++ if (currentCycle == lastCycle) {
++ Thread.yield();
++ continue;
++ }
++
++ // force response
++ this.parked.set(false);
++ LockSupport.unpark(this);
++
++ LockSupport.park("flushing queue executor thread");
++
++ // returns whether there are tasks queued, does not return whether there are tasks executing
++ // this is why we cycle twice twice through flush (we know a pollTask call is made after a flush cycle)
++ // we really only need to guarantee that the tasks this thread has queued has gone through, and can leave
++ // tasks queued concurrently that are unsychronized with this thread as undefined behavior
++ if (this.queue.hasTasks()) {
++ successes = 0;
++ } else {
++ ++successes;
++ }
++
++ } while (successes != 2);
++
++ }
++
++ /**
++ * Closes this queue executor's queue and optionally waits for it to empty.
++ *
++ * If wait is {@code true}, then the queue will be empty by the time this call completes.
++ *
++ *
++ * This function is MT-Safe.
++ *
++ * @param wait If this call is to wait until the queue is empty
++ * @param killQueue Whether to shutdown this thread's queue
++ * @return whether this thread shut down the queue
++ */
++ public boolean close(final boolean wait, final boolean killQueue) {
++ boolean ret = !killQueue ? false : this.queue.shutdown();
++ this.closed = true;
++
++ // force thread to respond to the shutdown
++ this.parked.set(false);
++ LockSupport.unpark(this);
++
++ if (wait) {
++ this.flush();
++ }
++ return ret;
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/chunk/PlayerChunkLoader.java b/src/main/java/io/papermc/paper/chunk/PlayerChunkLoader.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..e77972c4c264100ffdd824bfa2dac58dbbc6d678
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/chunk/PlayerChunkLoader.java
+@@ -0,0 +1,1128 @@
++package io.papermc.paper.chunk;
++
++import com.destroystokyo.paper.util.misc.PlayerAreaMap;
++import com.destroystokyo.paper.util.misc.PooledLinkedHashSets;
++import io.papermc.paper.configuration.GlobalConfiguration;
++import io.papermc.paper.util.CoordinateUtils;
++import io.papermc.paper.util.IntervalledCounter;
++import io.papermc.paper.util.TickThread;
++import it.unimi.dsi.fastutil.longs.LongOpenHashSet;
++import it.unimi.dsi.fastutil.objects.Reference2IntOpenHashMap;
++import it.unimi.dsi.fastutil.objects.Reference2ObjectLinkedOpenHashMap;
++import it.unimi.dsi.fastutil.objects.ReferenceLinkedOpenHashSet;
++import net.minecraft.network.protocol.game.ClientboundSetChunkCacheCenterPacket;
++import net.minecraft.network.protocol.game.ClientboundSetChunkCacheRadiusPacket;
++import net.minecraft.network.protocol.game.ClientboundSetSimulationDistancePacket;
++import io.papermc.paper.util.MCUtil;
++import net.minecraft.server.MinecraftServer;
++import net.minecraft.server.level.*;
++import net.minecraft.util.Mth;
++import net.minecraft.world.level.ChunkPos;
++import net.minecraft.world.level.chunk.LevelChunk;
++import org.apache.commons.lang3.mutable.MutableObject;
++import org.bukkit.craftbukkit.entity.CraftPlayer;
++import org.bukkit.entity.Player;
++import java.util.ArrayDeque;
++import java.util.ArrayList;
++import java.util.List;
++import java.util.TreeSet;
++import java.util.concurrent.atomic.AtomicInteger;
++
++public final class PlayerChunkLoader {
++
++ public static final int MIN_VIEW_DISTANCE = 2;
++ public static final int MAX_VIEW_DISTANCE = 32;
++
++ public static final int TICK_TICKET_LEVEL = 31;
++ public static final int LOADED_TICKET_LEVEL = 33;
++
++ public static int getTickViewDistance(final Player player) {
++ return getTickViewDistance(((CraftPlayer)player).getHandle());
++ }
++
++ public static int getTickViewDistance(final ServerPlayer player) {
++ final ServerLevel level = (ServerLevel)player.level;
++ final PlayerLoaderData data = level.chunkSource.chunkMap.playerChunkManager.getData(player);
++ if (data == null) {
++ return level.chunkSource.chunkMap.playerChunkManager.getTargetTickViewDistance();
++ }
++ return data.getTargetTickViewDistance();
++ }
++
++ public static int getLoadViewDistance(final Player player) {
++ return getLoadViewDistance(((CraftPlayer)player).getHandle());
++ }
++
++ public static int getLoadViewDistance(final ServerPlayer player) {
++ final ServerLevel level = (ServerLevel)player.level;
++ final PlayerLoaderData data = level.chunkSource.chunkMap.playerChunkManager.getData(player);
++ if (data == null) {
++ return level.chunkSource.chunkMap.playerChunkManager.getLoadDistance();
++ }
++ return data.getLoadDistance();
++ }
++
++ public static int getSendViewDistance(final Player player) {
++ return getSendViewDistance(((CraftPlayer)player).getHandle());
++ }
++
++ public static int getSendViewDistance(final ServerPlayer player) {
++ final ServerLevel level = (ServerLevel)player.level;
++ final PlayerLoaderData data = level.chunkSource.chunkMap.playerChunkManager.getData(player);
++ if (data == null) {
++ return level.chunkSource.chunkMap.playerChunkManager.getTargetSendDistance();
++ }
++ return data.getTargetSendViewDistance();
++ }
++
++ protected final ChunkMap chunkMap;
++ protected final Reference2ObjectLinkedOpenHashMap playerMap = new Reference2ObjectLinkedOpenHashMap<>(512, 0.7f);
++ protected final ReferenceLinkedOpenHashSet chunkSendQueue = new ReferenceLinkedOpenHashSet<>(512, 0.7f);
++
++ protected final TreeSet chunkLoadQueue = new TreeSet<>((final PlayerLoaderData p1, final PlayerLoaderData p2) -> {
++ if (p1 == p2) {
++ return 0;
++ }
++
++ final ChunkPriorityHolder holder1 = p1.loadQueue.peekFirst();
++ final ChunkPriorityHolder holder2 = p2.loadQueue.peekFirst();
++
++ final int priorityCompare = Double.compare(holder1 == null ? Double.MAX_VALUE : holder1.priority, holder2 == null ? Double.MAX_VALUE : holder2.priority);
++
++ final int lastLoadTimeCompare = Long.compare(p1.lastChunkLoad - p2.lastChunkLoad, 0);
++
++ if ((holder1 == null || holder2 == null || lastLoadTimeCompare == 0 || holder1.priority < 0.0 || holder2.priority < 0.0) && priorityCompare != 0) {
++ return priorityCompare;
++ }
++
++ if (lastLoadTimeCompare != 0) {
++ return lastLoadTimeCompare;
++ }
++
++ final int idCompare = Integer.compare(p1.player.getId(), p2.player.getId());
++
++ if (idCompare != 0) {
++ return idCompare;
++ }
++
++ // last resort
++ return Integer.compare(System.identityHashCode(p1), System.identityHashCode(p2));
++ });
++
++ protected final TreeSet chunkSendWaitQueue = new TreeSet<>((final PlayerLoaderData p1, final PlayerLoaderData p2) -> {
++ if (p1 == p2) {
++ return 0;
++ }
++
++ final int timeCompare = Long.compare(p1.nextChunkSendTarget - p2.nextChunkSendTarget, 0);
++ if (timeCompare != 0) {
++ return timeCompare;
++ }
++
++ final int idCompare = Integer.compare(p1.player.getId(), p2.player.getId());
++
++ if (idCompare != 0) {
++ return idCompare;
++ }
++
++ // last resort
++ return Integer.compare(System.identityHashCode(p1), System.identityHashCode(p2));
++ });
++
++
++ // no throttling is applied below this VD for loading
++
++ /**
++ * The chunks to be sent to players, provided they're send-ready. Send-ready means the chunk and its 1 radius neighbours are loaded.
++ */
++ public final PlayerAreaMap broadcastMap;
++
++ /**
++ * The chunks to be brought up to send-ready status. Send-ready means the chunk and its 1 radius neighbours are loaded.
++ */
++ public final PlayerAreaMap loadMap;
++
++ /**
++ * Areamap used only to remove tickets for send-ready chunks. View distance is always + 1 of load view distance. Thus,
++ * this map is always representing the chunks we are actually going to load.
++ */
++ public final PlayerAreaMap loadTicketCleanup;
++
++ /**
++ * The chunks to brought to ticking level. Each chunk must have 2 radius neighbours loaded before this can happen.
++ */
++ public final PlayerAreaMap tickMap;
++
++ /**
++ * -1 if defaulting to [load distance], else always in [2, load distance]
++ */
++ protected int rawSendDistance = -1;
++
++ /**
++ * -1 if defaulting to [tick view distance + 1], else always in [tick view distance + 1, 32 + 1]
++ */
++ protected int rawLoadDistance = -1;
++
++ /**
++ * Never -1, always in [2, 32]
++ */
++ protected int rawTickDistance = -1;
++
++ // methods to bridge for API
++
++ public int getTargetTickViewDistance() {
++ return this.getTickDistance();
++ }
++
++ public void setTargetTickViewDistance(final int distance) {
++ this.setTickDistance(distance);
++ }
++
++ public int getTargetNoTickViewDistance() {
++ return this.getLoadDistance() - 1;
++ }
++
++ public void setTargetNoTickViewDistance(final int distance) {
++ this.setLoadDistance(distance == -1 ? -1 : distance + 1);
++ }
++
++ public int getTargetSendDistance() {
++ return this.rawSendDistance == -1 ? this.getLoadDistance() : this.rawSendDistance;
++ }
++
++ public void setTargetSendDistance(final int distance) {
++ this.setSendDistance(distance);
++ }
++
++ // internal methods
++
++ public int getSendDistance() {
++ final int loadDistance = this.getLoadDistance();
++ return this.rawSendDistance == -1 ? loadDistance : Math.min(this.rawSendDistance, loadDistance);
++ }
++
++ public void setSendDistance(final int distance) {
++ if (distance != -1 && (distance < MIN_VIEW_DISTANCE || distance > MAX_VIEW_DISTANCE + 1)) {
++ throw new IllegalArgumentException("Send distance must be a number between " + MIN_VIEW_DISTANCE + " and " + (MAX_VIEW_DISTANCE + 1) + ", or -1, got: " + distance);
++ }
++ this.rawSendDistance = distance;
++ }
++
++ public int getLoadDistance() {
++ final int tickDistance = this.getTickDistance();
++ return this.rawLoadDistance == -1 ? tickDistance + 1 : Math.max(tickDistance + 1, this.rawLoadDistance);
++ }
++
++ public void setLoadDistance(final int distance) {
++ if (distance != -1 && (distance < MIN_VIEW_DISTANCE || distance > MAX_VIEW_DISTANCE + 1)) {
++ throw new IllegalArgumentException("Load distance must be a number between " + MIN_VIEW_DISTANCE + " and " + (MAX_VIEW_DISTANCE + 1) + ", or -1, got: " + distance);
++ }
++ this.rawLoadDistance = distance;
++ }
++
++ public int getTickDistance() {
++ return this.rawTickDistance;
++ }
++
++ public void setTickDistance(final int distance) {
++ if (distance < MIN_VIEW_DISTANCE || distance > MAX_VIEW_DISTANCE) {
++ throw new IllegalArgumentException("View distance must be a number between " + MIN_VIEW_DISTANCE + " and " + MAX_VIEW_DISTANCE + ", got: " + distance);
++ }
++ this.rawTickDistance = distance;
++ }
++
++ /*
++ Players have 3 different types of view distance:
++ 1. Sending view distance
++ 2. Loading view distance
++ 3. Ticking view distance
++
++ But for configuration purposes (and API) there are:
++ 1. No-tick view distance
++ 2. Tick view distance
++ 3. Broadcast view distance
++
++ These aren't always the same as the types we represent internally.
++
++ Loading view distance is always max(no-tick + 1, tick + 1)
++ - no-tick has 1 added because clients need an extra radius to render chunks
++ - tick has 1 added because it needs an extra radius of chunks to load before they can be marked ticking
++
++ Loading view distance is defined as the radius of chunks that will be brought to send-ready status, which means
++ it loads chunks in radius load-view-distance + 1.
++
++ The maximum value for send view distance is the load view distance. API can set it lower.
++ */
++
++ public PlayerChunkLoader(final ChunkMap chunkMap, final PooledLinkedHashSets pooledHashSets) {
++ this.chunkMap = chunkMap;
++ this.broadcastMap = new PlayerAreaMap(pooledHashSets,
++ null,
++ (ServerPlayer player, int rangeX, int rangeZ, int currPosX, int currPosZ, int prevPosX, int prevPosZ,
++ com.destroystokyo.paper.util.misc.PooledLinkedHashSets.PooledObjectLinkedOpenHashSet newState) -> {
++ PlayerChunkLoader.this.onChunkLeave(player, rangeX, rangeZ);
++ });
++ this.loadMap = new PlayerAreaMap(pooledHashSets,
++ null,
++ (ServerPlayer player, int rangeX, int rangeZ, int currPosX, int currPosZ, int prevPosX, int prevPosZ,
++ com.destroystokyo.paper.util.misc.PooledLinkedHashSets.PooledObjectLinkedOpenHashSet newState) -> {
++ if (newState != null) {
++ return;
++ }
++ PlayerChunkLoader.this.isTargetedForPlayerLoad.remove(CoordinateUtils.getChunkKey(rangeX, rangeZ));
++ });
++ this.loadTicketCleanup = new PlayerAreaMap(pooledHashSets,
++ null,
++ (ServerPlayer player, int rangeX, int rangeZ, int currPosX, int currPosZ, int prevPosX, int prevPosZ,
++ com.destroystokyo.paper.util.misc.PooledLinkedHashSets.PooledObjectLinkedOpenHashSet newState) -> {
++ if (newState != null) {
++ return;
++ }
++ ChunkPos chunkPos = new ChunkPos(rangeX, rangeZ);
++ PlayerChunkLoader.this.chunkMap.level.getChunkSource().removeTicketAtLevel(TicketType.PLAYER, chunkPos, LOADED_TICKET_LEVEL, chunkPos);
++ if (PlayerChunkLoader.this.chunkTicketTracker.remove(chunkPos.toLong())) {
++ --PlayerChunkLoader.this.concurrentChunkLoads;
++ }
++ });
++ this.tickMap = new PlayerAreaMap(pooledHashSets,
++ (ServerPlayer player, int rangeX, int rangeZ, int currPosX, int currPosZ, int prevPosX, int prevPosZ,
++ com.destroystokyo.paper.util.misc.PooledLinkedHashSets.PooledObjectLinkedOpenHashSet newState) -> {
++ if (newState.size() != 1) {
++ return;
++ }
++ LevelChunk chunk = PlayerChunkLoader.this.chunkMap.level.getChunkSource().getChunkAtIfLoadedMainThreadNoCache(rangeX, rangeZ);
++ if (chunk == null || !chunk.areNeighboursLoaded(2)) {
++ return;
++ }
++
++ ChunkPos chunkPos = new ChunkPos(rangeX, rangeZ);
++ PlayerChunkLoader.this.chunkMap.level.getChunkSource().addTicketAtLevel(TicketType.PLAYER, chunkPos, TICK_TICKET_LEVEL, chunkPos);
++ },
++ (ServerPlayer player, int rangeX, int rangeZ, int currPosX, int currPosZ, int prevPosX, int prevPosZ,
++ com.destroystokyo.paper.util.misc.PooledLinkedHashSets.PooledObjectLinkedOpenHashSet newState) -> {
++ if (newState != null) {
++ return;
++ }
++ ChunkPos chunkPos = new ChunkPos(rangeX, rangeZ);
++ PlayerChunkLoader.this.chunkMap.level.getChunkSource().removeTicketAtLevel(TicketType.PLAYER, chunkPos, TICK_TICKET_LEVEL, chunkPos);
++ });
++ }
++
++ protected final LongOpenHashSet isTargetedForPlayerLoad = new LongOpenHashSet();
++ protected final LongOpenHashSet chunkTicketTracker = new LongOpenHashSet();
++
++ public boolean isChunkNearPlayers(final int chunkX, final int chunkZ) {
++ final PooledLinkedHashSets.PooledObjectLinkedOpenHashSet playersInSendRange = this.broadcastMap.getObjectsInRange(chunkX, chunkZ);
++
++ return playersInSendRange != null;
++ }
++
++ public void onChunkPostProcessing(final int chunkX, final int chunkZ) {
++ this.onChunkSendReady(chunkX, chunkZ);
++ }
++
++ private boolean chunkNeedsPostProcessing(final int chunkX, final int chunkZ) {
++ final long key = CoordinateUtils.getChunkKey(chunkX, chunkZ);
++ final ChunkHolder chunk = this.chunkMap.getVisibleChunkIfPresent(key);
++
++ if (chunk == null) {
++ return false;
++ }
++
++ final LevelChunk levelChunk = chunk.getSendingChunk();
++
++ return levelChunk != null && !levelChunk.isPostProcessingDone;
++ }
++
++ // rets whether the chunk is at a loaded stage that is ready to be sent to players
++ public boolean isChunkPlayerLoaded(final int chunkX, final int chunkZ) {
++ final long key = CoordinateUtils.getChunkKey(chunkX, chunkZ);
++ final ChunkHolder chunk = this.chunkMap.getVisibleChunkIfPresent(key);
++
++ if (chunk == null) {
++ return false;
++ }
++
++ final LevelChunk levelChunk = chunk.getSendingChunk();
++
++ return levelChunk != null && levelChunk.isPostProcessingDone && this.isTargetedForPlayerLoad.contains(key);
++ }
++
++ public boolean isChunkSent(final ServerPlayer player, final int chunkX, final int chunkZ, final boolean borderOnly) {
++ return borderOnly ? this.isChunkSentBorderOnly(player, chunkX, chunkZ) : this.isChunkSent(player, chunkX, chunkZ);
++ }
++
++ public boolean isChunkSent(final ServerPlayer player, final int chunkX, final int chunkZ) {
++ final PlayerLoaderData data = this.playerMap.get(player);
++ if (data == null) {
++ return false;
++ }
++
++ return data.hasSentChunk(chunkX, chunkZ);
++ }
++
++ public boolean isChunkSentBorderOnly(final ServerPlayer player, final int chunkX, final int chunkZ) {
++ final PlayerLoaderData data = this.playerMap.get(player);
++ if (data == null) {
++ return false;
++ }
++
++ final boolean center = data.hasSentChunk(chunkX, chunkZ);
++ if (!center) {
++ return false;
++ }
++
++ return !(data.hasSentChunk(chunkX - 1, chunkZ) && data.hasSentChunk(chunkX + 1, chunkZ) &&
++ data.hasSentChunk(chunkX, chunkZ - 1) && data.hasSentChunk(chunkX, chunkZ + 1));
++ }
++
++ protected int getMaxConcurrentChunkSends() {
++ return GlobalConfiguration.get().chunkLoading.maxConcurrentSends;
++ }
++
++ protected int getMaxChunkLoads() {
++ double config = GlobalConfiguration.get().chunkLoading.playerMaxConcurrentLoads;
++ double max = GlobalConfiguration.get().chunkLoading.globalMaxConcurrentLoads;
++ return (int)Math.ceil(Math.min(config * MinecraftServer.getServer().getPlayerCount(), max <= 1.0 ? Double.MAX_VALUE : max));
++ }
++
++ protected long getTargetSendPerPlayerAddend() {
++ return GlobalConfiguration.get().chunkLoading.targetPlayerChunkSendRate <= 1.0 ? 0L : (long)Math.round(1.0e9 / GlobalConfiguration.get().chunkLoading.targetPlayerChunkSendRate);
++ }
++
++ protected long getMaxSendAddend() {
++ return GlobalConfiguration.get().chunkLoading.globalMaxChunkSendRate <= 1.0 ? 0L : (long)Math.round(1.0e9 / GlobalConfiguration.get().chunkLoading.globalMaxChunkSendRate);
++ }
++
++ public void onChunkPlayerTickReady(final int chunkX, final int chunkZ) {
++ final ChunkPos chunkPos = new ChunkPos(chunkX, chunkZ);
++ this.chunkMap.level.getChunkSource().addTicketAtLevel(TicketType.PLAYER, chunkPos, TICK_TICKET_LEVEL, chunkPos);
++ }
++
++ public void onChunkSendReady(final int chunkX, final int chunkZ) {
++ final PooledLinkedHashSets.PooledObjectLinkedOpenHashSet playersInSendRange = this.broadcastMap.getObjectsInRange(chunkX, chunkZ);
++
++ if (playersInSendRange == null) {
++ return;
++ }
++
++ final Object[] rawData = playersInSendRange.getBackingSet();
++ for (int i = 0, len = rawData.length; i < len; ++i) {
++ final Object raw = rawData[i];
++
++ if (!(raw instanceof ServerPlayer)) {
++ continue;
++ }
++ this.onChunkSendReady((ServerPlayer)raw, chunkX, chunkZ);
++ }
++ }
++
++ public void onChunkSendReady(final ServerPlayer player, final int chunkX, final int chunkZ) {
++ final PlayerLoaderData data = this.playerMap.get(player);
++
++ if (data == null) {
++ return;
++ }
++
++ if (data.hasSentChunk(chunkX, chunkZ) || !this.isChunkPlayerLoaded(chunkX, chunkZ)) {
++ // if we don't have player tickets, then the load logic will pick this up and queue to send
++ return;
++ }
++
++ if (!data.chunksToBeSent.remove(CoordinateUtils.getChunkKey(chunkX, chunkZ))) {
++ // don't queue to send, we don't want the chunk
++ return;
++ }
++
++ final long playerPos = this.broadcastMap.getLastCoordinate(player);
++ final int playerChunkX = CoordinateUtils.getChunkX(playerPos);
++ final int playerChunkZ = CoordinateUtils.getChunkZ(playerPos);
++ final int manhattanDistance = Math.abs(playerChunkX - chunkX) + Math.abs(playerChunkZ - chunkZ);
++
++ final ChunkPriorityHolder holder = new ChunkPriorityHolder(chunkX, chunkZ, manhattanDistance, 0.0);
++ data.sendQueue.add(holder);
++ }
++
++ public void onChunkLoad(final int chunkX, final int chunkZ) {
++ if (this.chunkTicketTracker.remove(CoordinateUtils.getChunkKey(chunkX, chunkZ))) {
++ --this.concurrentChunkLoads;
++ }
++ }
++
++ public void onChunkLeave(final ServerPlayer player, final int chunkX, final int chunkZ) {
++ final PlayerLoaderData data = this.playerMap.get(player);
++
++ if (data == null) {
++ return;
++ }
++
++ data.unloadChunk(chunkX, chunkZ);
++ }
++
++ public void addPlayer(final ServerPlayer player) {
++ TickThread.ensureTickThread("Cannot add player async");
++ if (!player.isRealPlayer) {
++ return;
++ }
++ final PlayerLoaderData data = new PlayerLoaderData(player, this);
++ if (this.playerMap.putIfAbsent(player, data) == null) {
++ data.update();
++ }
++ }
++
++ public void removePlayer(final ServerPlayer player) {
++ TickThread.ensureTickThread("Cannot remove player async");
++ if (!player.isRealPlayer) {
++ return;
++ }
++
++ final PlayerLoaderData loaderData = this.playerMap.remove(player);
++ if (loaderData == null) {
++ return;
++ }
++ loaderData.remove();
++ this.chunkLoadQueue.remove(loaderData);
++ this.chunkSendQueue.remove(loaderData);
++ this.chunkSendWaitQueue.remove(loaderData);
++ synchronized (this.sendingChunkCounts) {
++ final int count = this.sendingChunkCounts.removeInt(loaderData);
++ if (count != 0) {
++ concurrentChunkSends.getAndAdd(-count);
++ }
++ }
++ }
++
++ public void updatePlayer(final ServerPlayer player) {
++ TickThread.ensureTickThread("Cannot update player async");
++ if (!player.isRealPlayer) {
++ return;
++ }
++ final PlayerLoaderData loaderData = this.playerMap.get(player);
++ if (loaderData != null) {
++ loaderData.update();
++ }
++ }
++
++ public PlayerLoaderData getData(final ServerPlayer player) {
++ return this.playerMap.get(player);
++ }
++
++ public void tick() {
++ TickThread.ensureTickThread("Cannot tick async");
++ for (final PlayerLoaderData data : this.playerMap.values()) {
++ data.update();
++ }
++ this.tickMidTick();
++ }
++
++ protected static final AtomicInteger concurrentChunkSends = new AtomicInteger();
++ protected final Reference2IntOpenHashMap sendingChunkCounts = new Reference2IntOpenHashMap<>();
++ private static long nextChunkSend;
++ private void trySendChunks() {
++ final long time = System.nanoTime();
++ if (nextChunkSend - time > 0) {
++ return;
++ }
++ // drain entries from wait queue
++ while (!this.chunkSendWaitQueue.isEmpty()) {
++ final PlayerLoaderData data = this.chunkSendWaitQueue.first();
++
++ if (data.nextChunkSendTarget - time > 0) {
++ break;
++ }
++
++ this.chunkSendWaitQueue.pollFirst();
++
++ this.chunkSendQueue.add(data);
++ }
++
++ if (this.chunkSendQueue.isEmpty()) {
++ return;
++ }
++
++ final int maxSends = this.getMaxConcurrentChunkSends();
++ final long nextPlayerDeadline = this.getTargetSendPerPlayerAddend() + time;
++ for (;;) {
++ if (this.chunkSendQueue.isEmpty()) {
++ break;
++ }
++ final int currSends = concurrentChunkSends.get();
++ if (currSends >= maxSends) {
++ break;
++ }
++
++ if (!concurrentChunkSends.compareAndSet(currSends, currSends + 1)) {
++ continue;
++ }
++
++ // send chunk
++
++ final PlayerLoaderData data = this.chunkSendQueue.removeFirst();
++
++ final ChunkPriorityHolder queuedSend = data.sendQueue.pollFirst();
++ if (queuedSend == null) {
++ concurrentChunkSends.getAndDecrement(); // we never sent, so decrease
++ // stop iterating over players who have nothing to send
++ if (this.chunkSendQueue.isEmpty()) {
++ // nothing left
++ break;
++ }
++ continue;
++ }
++
++ if (!this.isChunkPlayerLoaded(queuedSend.chunkX, queuedSend.chunkZ)) {
++ throw new IllegalStateException();
++ }
++
++ data.nextChunkSendTarget = nextPlayerDeadline;
++ this.chunkSendWaitQueue.add(data);
++
++ synchronized (this.sendingChunkCounts) {
++ this.sendingChunkCounts.addTo(data, 1);
++ }
++
++ data.sendChunk(queuedSend.chunkX, queuedSend.chunkZ, () -> {
++ synchronized (this.sendingChunkCounts) {
++ final int count = this.sendingChunkCounts.getInt(data);
++ if (count == 0) {
++ // disconnected, so we don't need to decrement: it will be decremented for us
++ return;
++ }
++ if (count == 1) {
++ this.sendingChunkCounts.removeInt(data);
++ } else {
++ this.sendingChunkCounts.put(data, count - 1);
++ }
++ }
++
++ concurrentChunkSends.getAndDecrement();
++ });
++
++ nextChunkSend = this.getMaxSendAddend() + time;
++ if (nextChunkSend - time > 0) {
++ break;
++ }
++ }
++ }
++
++ protected int concurrentChunkLoads;
++ // this interval prevents bursting a lot of chunk loads
++ protected static final IntervalledCounter TICKET_ADDITION_COUNTER_SHORT = new IntervalledCounter((long)(1.0e6 * 50.0)); // 50ms
++ // this interval ensures the rate is kept between ticks correctly
++ protected static final IntervalledCounter TICKET_ADDITION_COUNTER_LONG = new IntervalledCounter((long)(1.0e6 * 1000.0)); // 1000ms
++ private void tryLoadChunks() {
++ if (this.chunkLoadQueue.isEmpty()) {
++ return;
++ }
++
++ final int maxLoads = this.getMaxChunkLoads();
++ final long time = System.nanoTime();
++ boolean updatedCounters = false;
++ for (;;) {
++ final PlayerLoaderData data = this.chunkLoadQueue.pollFirst();
++
++ data.lastChunkLoad = time;
++
++ final ChunkPriorityHolder queuedLoad = data.loadQueue.peekFirst();
++ if (queuedLoad == null) {
++ if (this.chunkLoadQueue.isEmpty()) {
++ break;
++ }
++ continue;
++ }
++
++ if (!updatedCounters) {
++ updatedCounters = true;
++ TICKET_ADDITION_COUNTER_SHORT.updateCurrentTime(time);
++ TICKET_ADDITION_COUNTER_LONG.updateCurrentTime(time);
++ data.ticketAdditionCounterShort.updateCurrentTime(time);
++ data.ticketAdditionCounterLong.updateCurrentTime(time);
++ }
++
++ if (this.isChunkPlayerLoaded(queuedLoad.chunkX, queuedLoad.chunkZ)) {
++ // already loaded!
++ data.loadQueue.pollFirst(); // already loaded so we just skip
++ this.chunkLoadQueue.add(data);
++
++ // ensure the chunk is queued to send
++ this.onChunkSendReady(queuedLoad.chunkX, queuedLoad.chunkZ);
++ continue;
++ }
++
++ final long chunkKey = CoordinateUtils.getChunkKey(queuedLoad.chunkX, queuedLoad.chunkZ);
++
++ final double priority = queuedLoad.priority;
++ // while we do need to rate limit chunk loads, the logic for sending chunks requires that tickets are present.
++ // when chunks are loaded (i.e spawn) but do not have this player's tickets, they have to wait behind the
++ // load queue. To avoid this problem, we check early here if tickets are required to load the chunk - if they
++ // aren't required, it bypasses the limiter system.
++ boolean unloadedTargetChunk = false;
++ unloaded_check:
++ for (int dz = -1; dz <= 1; ++dz) {
++ for (int dx = -1; dx <= 1; ++dx) {
++ final int offX = queuedLoad.chunkX + dx;
++ final int offZ = queuedLoad.chunkZ + dz;
++ if (this.chunkMap.level.getChunkSource().getChunkAtIfLoadedMainThreadNoCache(offX, offZ) == null) {
++ unloadedTargetChunk = true;
++ break unloaded_check;
++ }
++ }
++ }
++ if (unloadedTargetChunk && priority >= 0.0) {
++ // priority >= 0.0 implies rate limited chunks
++
++ final int currentChunkLoads = this.concurrentChunkLoads;
++ if (currentChunkLoads >= maxLoads || (GlobalConfiguration.get().chunkLoading.globalMaxChunkLoadRate > 0 && (TICKET_ADDITION_COUNTER_SHORT.getRate() >= GlobalConfiguration.get().chunkLoading.globalMaxChunkLoadRate || TICKET_ADDITION_COUNTER_LONG.getRate() >= GlobalConfiguration.get().chunkLoading.globalMaxChunkLoadRate))
++ || (GlobalConfiguration.get().chunkLoading.playerMaxChunkLoadRate > 0.0 && (data.ticketAdditionCounterShort.getRate() >= GlobalConfiguration.get().chunkLoading.playerMaxChunkLoadRate || data.ticketAdditionCounterLong.getRate() >= GlobalConfiguration.get().chunkLoading.playerMaxChunkLoadRate))) {
++ // don't poll, we didn't load it
++ this.chunkLoadQueue.add(data);
++ break;
++ }
++ }
++
++ // can only poll after we decide to load
++ data.loadQueue.pollFirst();
++
++ // now that we've polled we can re-add to load queue
++ this.chunkLoadQueue.add(data);
++
++ // add necessary tickets to load chunk up to send-ready
++ for (int dz = -1; dz <= 1; ++dz) {
++ for (int dx = -1; dx <= 1; ++dx) {
++ final int offX = queuedLoad.chunkX + dx;
++ final int offZ = queuedLoad.chunkZ + dz;
++ final ChunkPos chunkPos = new ChunkPos(offX, offZ);
++
++ this.chunkMap.level.getChunkSource().addTicketAtLevel(TicketType.PLAYER, chunkPos, LOADED_TICKET_LEVEL, chunkPos);
++ if (this.chunkMap.level.getChunkSource().getChunkAtIfLoadedMainThreadNoCache(offX, offZ) != null) {
++ continue;
++ }
++
++ if (priority > 0.0 && this.chunkTicketTracker.add(CoordinateUtils.getChunkKey(offX, offZ))) {
++ // won't reach here if unloadedTargetChunk is false
++ ++this.concurrentChunkLoads;
++ TICKET_ADDITION_COUNTER_SHORT.addTime(time);
++ TICKET_ADDITION_COUNTER_LONG.addTime(time);
++ data.ticketAdditionCounterShort.addTime(time);
++ data.ticketAdditionCounterLong.addTime(time);
++ }
++ }
++ }
++
++ // mark that we've added tickets here
++ this.isTargetedForPlayerLoad.add(chunkKey);
++
++ // it's possible all we needed was the player tickets to queue up the send.
++ if (this.isChunkPlayerLoaded(queuedLoad.chunkX, queuedLoad.chunkZ)) {
++ // yup, all we needed.
++ this.onChunkSendReady(queuedLoad.chunkX, queuedLoad.chunkZ);
++ } else if (this.chunkNeedsPostProcessing(queuedLoad.chunkX, queuedLoad.chunkZ)) {
++ // requires post processing
++ this.chunkMap.mainThreadExecutor.execute(() -> {
++ final long key = CoordinateUtils.getChunkKey(queuedLoad.chunkX, queuedLoad.chunkZ);
++ final ChunkHolder holder = PlayerChunkLoader.this.chunkMap.getVisibleChunkIfPresent(key);
++
++ if (holder == null) {
++ return;
++ }
++
++ final LevelChunk chunk = holder.getSendingChunk();
++
++ if (chunk != null && !chunk.isPostProcessingDone) {
++ chunk.postProcessGeneration();
++ }
++ });
++ }
++ }
++ }
++
++ public void tickMidTick() {
++ // try to send more chunks
++ this.trySendChunks();
++
++ // try to queue more chunks to load
++ this.tryLoadChunks();
++ }
++
++ static final class ChunkPriorityHolder {
++ public final int chunkX;
++ public final int chunkZ;
++ public final int manhattanDistanceToPlayer;
++ public final double priority;
++
++ public ChunkPriorityHolder(final int chunkX, final int chunkZ, final int manhattanDistanceToPlayer, final double priority) {
++ this.chunkX = chunkX;
++ this.chunkZ = chunkZ;
++ this.manhattanDistanceToPlayer = manhattanDistanceToPlayer;
++ this.priority = priority;
++ }
++ }
++
++ public static final class PlayerLoaderData {
++
++ protected static final float FOV = 110.0f;
++ protected static final double PRIORITISED_DISTANCE = 12.0 * 16.0;
++
++ // Player max sprint speed is approximately 8m/s
++ protected static final double LOOK_PRIORITY_SPEED_THRESHOLD = (10.0/20.0) * (10.0/20.0);
++ protected static final double LOOK_PRIORITY_YAW_DELTA_RECALC_THRESHOLD = 3.0f;
++
++ protected double lastLocX = Double.NEGATIVE_INFINITY;
++ protected double lastLocZ = Double.NEGATIVE_INFINITY;
++
++ protected int lastChunkX = Integer.MIN_VALUE;
++ protected int lastChunkZ = Integer.MIN_VALUE;
++
++ // this is corrected so that 0 is along the positive x-axis
++ protected float lastYaw = Float.NEGATIVE_INFINITY;
++
++ protected int lastSendDistance = Integer.MIN_VALUE;
++ protected int lastLoadDistance = Integer.MIN_VALUE;
++ protected int lastTickDistance = Integer.MIN_VALUE;
++ protected boolean usingLookingPriority;
++
++ protected final ServerPlayer player;
++ protected final PlayerChunkLoader loader;
++
++ // warning: modifications of this field must be aware that the loadQueue inside PlayerChunkLoader uses this field
++ // in a comparator!
++ protected final ArrayDeque loadQueue = new ArrayDeque<>();
++ protected final LongOpenHashSet sentChunks = new LongOpenHashSet();
++ protected final LongOpenHashSet chunksToBeSent = new LongOpenHashSet();
++
++ protected final TreeSet sendQueue = new TreeSet<>((final ChunkPriorityHolder p1, final ChunkPriorityHolder p2) -> {
++ final int distanceCompare = Integer.compare(p1.manhattanDistanceToPlayer, p2.manhattanDistanceToPlayer);
++ if (distanceCompare != 0) {
++ return distanceCompare;
++ }
++
++ final int coordinateXCompare = Integer.compare(p1.chunkX, p2.chunkX);
++ if (coordinateXCompare != 0) {
++ return coordinateXCompare;
++ }
++
++ return Integer.compare(p1.chunkZ, p2.chunkZ);
++ });
++
++ protected int sendViewDistance = -1;
++ protected int loadViewDistance = -1;
++ protected int tickViewDistance = -1;
++
++ protected long nextChunkSendTarget;
++
++ // this interval prevents bursting a lot of chunk loads
++ protected final IntervalledCounter ticketAdditionCounterShort = new IntervalledCounter((long)(1.0e6 * 50.0)); // 50ms
++ // this ensures the rate is kept between ticks correctly
++ protected final IntervalledCounter ticketAdditionCounterLong = new IntervalledCounter((long)(1.0e6 * 1000.0)); // 1000ms
++
++ public long lastChunkLoad;
++
++ public PlayerLoaderData(final ServerPlayer player, final PlayerChunkLoader loader) {
++ this.player = player;
++ this.loader = loader;
++ }
++
++ // these view distance methods are for api
++ public int getTargetSendViewDistance() {
++ final int tickViewDistance = this.tickViewDistance == -1 ? this.loader.getTickDistance() : this.tickViewDistance;
++ final int loadViewDistance = Math.max(tickViewDistance + 1, this.loadViewDistance == -1 ? this.loader.getLoadDistance() : this.loadViewDistance);
++ final int clientViewDistance = this.getClientViewDistance();
++ final int sendViewDistance = Math.min(loadViewDistance, this.sendViewDistance == -1 ? (!GlobalConfiguration.get().chunkLoading.autoconfigSendDistance || clientViewDistance == -1 ? this.loader.getSendDistance() : clientViewDistance + 1) : this.sendViewDistance);
++ return sendViewDistance;
++ }
++
++ public void setTargetSendViewDistance(final int distance) {
++ if (distance != -1 && (distance < MIN_VIEW_DISTANCE || distance > MAX_VIEW_DISTANCE + 1)) {
++ throw new IllegalArgumentException("Send view distance must be a number between " + MIN_VIEW_DISTANCE + " and " + (MAX_VIEW_DISTANCE + 1) + " or -1, got: " + distance);
++ }
++ this.sendViewDistance = distance;
++ }
++
++ public int getTargetNoTickViewDistance() {
++ return (this.loadViewDistance == -1 ? this.getLoadDistance() : this.loadViewDistance) - 1;
++ }
++
++ public void setTargetNoTickViewDistance(final int distance) {
++ if (distance != -1 && (distance < MIN_VIEW_DISTANCE || distance > MAX_VIEW_DISTANCE)) {
++ throw new IllegalArgumentException("Simulation distance must be a number between " + MIN_VIEW_DISTANCE + " and " + MAX_VIEW_DISTANCE + " or -1, got: " + distance);
++ }
++ this.loadViewDistance = distance == -1 ? -1 : distance + 1;
++ }
++
++ public int getTargetTickViewDistance() {
++ return this.tickViewDistance == -1 ? this.loader.getTickDistance() : this.tickViewDistance;
++ }
++
++ public void setTargetTickViewDistance(final int distance) {
++ if (distance != -1 && (distance < MIN_VIEW_DISTANCE || distance > MAX_VIEW_DISTANCE)) {
++ throw new IllegalArgumentException("View distance must be a number between " + MIN_VIEW_DISTANCE + " and " + MAX_VIEW_DISTANCE + " or -1, got: " + distance);
++ }
++ this.tickViewDistance = distance;
++ }
++
++ protected int getLoadDistance() {
++ final int tickViewDistance = this.tickViewDistance == -1 ? this.loader.getTickDistance() : this.tickViewDistance;
++
++ return Math.max(tickViewDistance + 1, this.loadViewDistance == -1 ? this.loader.getLoadDistance() : this.loadViewDistance);
++ }
++
++ public boolean hasSentChunk(final int chunkX, final int chunkZ) {
++ return this.sentChunks.contains(CoordinateUtils.getChunkKey(chunkX, chunkZ));
++ }
++
++ public void sendChunk(final int chunkX, final int chunkZ, final Runnable onChunkSend) {
++ if (this.sentChunks.add(CoordinateUtils.getChunkKey(chunkX, chunkZ))) {
++ this.player.getLevel().getChunkSource().chunkMap.updateChunkTracking(this.player,
++ new ChunkPos(chunkX, chunkZ), new MutableObject<>(), false, true); // unloaded, loaded
++ this.player.connection.connection.execute(onChunkSend);
++ } else {
++ throw new IllegalStateException();
++ }
++ }
++
++ public void unloadChunk(final int chunkX, final int chunkZ) {
++ if (this.sentChunks.remove(CoordinateUtils.getChunkKey(chunkX, chunkZ))) {
++ this.player.getLevel().getChunkSource().chunkMap.updateChunkTracking(this.player,
++ new ChunkPos(chunkX, chunkZ), null, true, false); // unloaded, loaded
++ }
++ }
++
++ protected static boolean wantChunkLoaded(final int centerX, final int centerZ, final int chunkX, final int chunkZ,
++ final int sendRadius) {
++ // expect sendRadius to be = 1 + target viewable radius
++ return ChunkMap.isChunkInRange(chunkX, chunkZ, centerX, centerZ, sendRadius);
++ }
++
++ protected static boolean triangleIntersects(final double p1x, final double p1z, // triangle point
++ final double p2x, final double p2z, // triangle point
++ final double p3x, final double p3z, // triangle point
++
++ final double targetX, final double targetZ) { // point
++ // from barycentric coordinates:
++ // targetX = a*p1x + b*p2x + c*p3x
++ // targetZ = a*p1z + b*p2z + c*p3z
++ // 1.0 = a*1.0 + b*1.0 + c*1.0
++ // where a, b, c >= 0.0
++ // so, if any of a, b, c are less-than zero then there is no intersection.
++
++ // d = ((p2z - p3z)(p1x - p3x) + (p3x - p2x)(p1z - p3z))
++ // a = ((p2z - p3z)(targetX - p3x) + (p3x - p2x)(targetZ - p3z)) / d
++ // b = ((p3z - p1z)(targetX - p3x) + (p1x - p3x)(targetZ - p3z)) / d
++ // c = 1.0 - a - b
++
++ final double d = (p2z - p3z)*(p1x - p3x) + (p3x - p2x)*(p1z - p3z);
++ final double a = ((p2z - p3z)*(targetX - p3x) + (p3x - p2x)*(targetZ - p3z)) / d;
++
++ if (a < 0.0 || a > 1.0) {
++ return false;
++ }
++
++ final double b = ((p3z - p1z)*(targetX - p3x) + (p1x - p3x)*(targetZ - p3z)) / d;
++ if (b < 0.0 || b > 1.0) {
++ return false;
++ }
++
++ final double c = 1.0 - a - b;
++
++ return c >= 0.0 && c <= 1.0;
++ }
++
++ public void remove() {
++ this.loader.broadcastMap.remove(this.player);
++ this.loader.loadMap.remove(this.player);
++ this.loader.loadTicketCleanup.remove(this.player);
++ this.loader.tickMap.remove(this.player);
++ }
++
++ protected int getClientViewDistance() {
++ return this.player.clientViewDistance == null ? -1 : Math.max(0, this.player.clientViewDistance.intValue());
++ }
++
++ public void update() {
++ final int tickViewDistance = this.tickViewDistance == -1 ? this.loader.getTickDistance() : this.tickViewDistance;
++ // load view cannot be less-than tick view + 1
++ final int loadViewDistance = Math.max(tickViewDistance + 1, this.loadViewDistance == -1 ? this.loader.getLoadDistance() : this.loadViewDistance);
++ // send view cannot be greater-than load view
++ final int clientViewDistance = this.getClientViewDistance();
++ final int sendViewDistance = Math.min(loadViewDistance, this.sendViewDistance == -1 ? (!GlobalConfiguration.get().chunkLoading.autoconfigSendDistance || clientViewDistance == -1 ? this.loader.getSendDistance() : clientViewDistance + 1) : this.sendViewDistance);
++
++ final double posX = this.player.getX();
++ final double posZ = this.player.getZ();
++ final float yaw = MCUtil.normalizeYaw(this.player.getYRot() + 90.0f); // mc yaw 0 is along the positive z axis, but obviously this is really dumb - offset so we are at positive x-axis
++
++ // in general, we really only want to prioritise chunks in front if we know we're moving pretty fast into them.
++ final boolean useLookPriority = GlobalConfiguration.get().chunkLoading.enableFrustumPriority && (this.player.getDeltaMovement().horizontalDistanceSqr() > LOOK_PRIORITY_SPEED_THRESHOLD ||
++ this.player.getAbilities().flying);
++
++ // make sure we're in the send queue
++ this.loader.chunkSendWaitQueue.add(this);
++
++ if (
++ // has view distance stayed the same?
++ sendViewDistance == this.lastSendDistance
++ && loadViewDistance == this.lastLoadDistance
++ && tickViewDistance == this.lastTickDistance
++
++ && (this.usingLookingPriority ? (
++ // has our block stayed the same (this also accounts for chunk change)?
++ Mth.floor(this.lastLocX) == Mth.floor(posX)
++ && Mth.floor(this.lastLocZ) == Mth.floor(posZ)
++ ) : (
++ // has our chunk stayed the same
++ (Mth.floor(this.lastLocX) >> 4) == (Mth.floor(posX) >> 4)
++ && (Mth.floor(this.lastLocZ) >> 4) == (Mth.floor(posZ) >> 4)
++ ))
++
++ // has our decision about look priority changed?
++ && this.usingLookingPriority == useLookPriority
++
++ // if we are currently using look priority, has our yaw stayed within recalc threshold?
++ && (!this.usingLookingPriority || Math.abs(yaw - this.lastYaw) <= LOOK_PRIORITY_YAW_DELTA_RECALC_THRESHOLD)
++ ) {
++ // nothing we care about changed, so we're not re-calculating
++ return;
++ }
++
++ final int centerChunkX = Mth.floor(posX) >> 4;
++ final int centerChunkZ = Mth.floor(posZ) >> 4;
++
++ final boolean needsChunkCenterUpdate = (centerChunkX != this.lastChunkX) || (centerChunkZ != this.lastChunkZ);
++ this.loader.broadcastMap.addOrUpdate(this.player, centerChunkX, centerChunkZ, sendViewDistance);
++ this.loader.loadMap.addOrUpdate(this.player, centerChunkX, centerChunkZ, loadViewDistance);
++ this.loader.loadTicketCleanup.addOrUpdate(this.player, centerChunkX, centerChunkZ, loadViewDistance + 1);
++ this.loader.tickMap.addOrUpdate(this.player, centerChunkX, centerChunkZ, tickViewDistance);
++
++ if (sendViewDistance != this.lastSendDistance) {
++ // update the view radius for client
++ // note that this should be after the map calls because the client wont expect unload calls not in its VD
++ // and it's possible we decreased VD here
++ this.player.connection.send(new ClientboundSetChunkCacheRadiusPacket(sendViewDistance));
++ }
++ if (tickViewDistance != this.lastTickDistance) {
++ this.player.connection.send(new ClientboundSetSimulationDistancePacket(tickViewDistance));
++ }
++
++ this.lastLocX = posX;
++ this.lastLocZ = posZ;
++ this.lastYaw = yaw;
++ this.lastSendDistance = sendViewDistance;
++ this.lastLoadDistance = loadViewDistance;
++ this.lastTickDistance = tickViewDistance;
++ this.usingLookingPriority = useLookPriority;
++
++ this.lastChunkX = centerChunkX;
++ this.lastChunkZ = centerChunkZ;
++
++ // points for player "view" triangle:
++
++ // obviously, the player pos is a vertex
++ final double p1x = posX;
++ final double p1z = posZ;
++
++ // to the left of the looking direction
++ final double p2x = PRIORITISED_DISTANCE * Math.cos(Math.toRadians(yaw + (double)(FOV / 2.0))) // calculate rotated vector
++ + p1x; // offset vector
++ final double p2z = PRIORITISED_DISTANCE * Math.sin(Math.toRadians(yaw + (double)(FOV / 2.0))) // calculate rotated vector
++ + p1z; // offset vector
++
++ // to the right of the looking direction
++ final double p3x = PRIORITISED_DISTANCE * Math.cos(Math.toRadians(yaw - (double)(FOV / 2.0))) // calculate rotated vector
++ + p1x; // offset vector
++ final double p3z = PRIORITISED_DISTANCE * Math.sin(Math.toRadians(yaw - (double)(FOV / 2.0))) // calculate rotated vector
++ + p1z; // offset vector
++
++ // now that we have all of our points, we can recalculate the load queue
++
++ final List loadQueue = new ArrayList<>();
++
++ // clear send queue, we are re-sorting
++ this.sendQueue.clear();
++ // clear chunk want set, vd/position might have changed
++ this.chunksToBeSent.clear();
++
++ final int searchViewDistance = Math.max(loadViewDistance, sendViewDistance);
++
++ for (int dx = -searchViewDistance; dx <= searchViewDistance; ++dx) {
++ for (int dz = -searchViewDistance; dz <= searchViewDistance; ++dz) {
++ final int chunkX = dx + centerChunkX;
++ final int chunkZ = dz + centerChunkZ;
++ final int squareDistance = Math.max(Math.abs(dx), Math.abs(dz));
++ final boolean sendChunk = squareDistance <= sendViewDistance && wantChunkLoaded(centerChunkX, centerChunkZ, chunkX, chunkZ, sendViewDistance);
++
++ if (this.hasSentChunk(chunkX, chunkZ)) {
++ // already sent (which means it is also loaded)
++ if (!sendChunk) {
++ // have sent the chunk, but don't want it anymore
++ // unload it now
++ this.unloadChunk(chunkX, chunkZ);
++ }
++ continue;
++ }
++
++ final boolean loadChunk = squareDistance <= loadViewDistance;
++
++ final boolean prioritised = useLookPriority && triangleIntersects(
++ // prioritisation triangle
++ p1x, p1z, p2x, p2z, p3x, p3z,
++
++ // center of chunk
++ (double)((chunkX << 4) | 8), (double)((chunkZ << 4) | 8)
++ );
++
++ final int manhattanDistance = Math.abs(dx) + Math.abs(dz);
++
++ final double priority;
++
++ if (squareDistance <= GlobalConfiguration.get().chunkLoading.minLoadRadius) {
++ // priority should be negative, and we also want to order it from center outwards
++ // so we want (0,0) to be the smallest, and (minLoadedRadius,minLoadedRadius) to be the greatest
++ priority = -((2 * GlobalConfiguration.get().chunkLoading.minLoadRadius + 1) - manhattanDistance);
++ } else {
++ if (prioritised) {
++ // we don't prioritise these chunks above others because we also want to make sure some chunks
++ // will be loaded if the player changes direction
++ priority = (double)manhattanDistance / 6.0;
++ } else {
++ priority = (double)manhattanDistance;
++ }
++ }
++
++ final ChunkPriorityHolder holder = new ChunkPriorityHolder(chunkX, chunkZ, manhattanDistance, priority);
++
++ if (!this.loader.isChunkPlayerLoaded(chunkX, chunkZ)) {
++ if (loadChunk) {
++ loadQueue.add(holder);
++ if (sendChunk) {
++ this.chunksToBeSent.add(CoordinateUtils.getChunkKey(chunkX, chunkZ));
++ }
++ }
++ } else {
++ // loaded but not sent: so queue it!
++ if (sendChunk) {
++ this.sendQueue.add(holder);
++ }
++ }
++ }
++ }
++
++ loadQueue.sort((final ChunkPriorityHolder p1, final ChunkPriorityHolder p2) -> {
++ return Double.compare(p1.priority, p2.priority);
++ });
++
++ // we're modifying loadQueue, must remove
++ this.loader.chunkLoadQueue.remove(this);
++
++ this.loadQueue.clear();
++ this.loadQueue.addAll(loadQueue);
++
++ // must re-add
++ this.loader.chunkLoadQueue.add(this);
++
++ // update the chunk center
++ // this must be done last so that the client does not ignore any of our unload chunk packets
++ if (needsChunkCenterUpdate) {
++ this.player.connection.send(new ClientboundSetChunkCacheCenterPacket(centerChunkX, centerChunkZ));
++ }
++ }
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/chunk/system/ChunkSystem.java b/src/main/java/io/papermc/paper/chunk/system/ChunkSystem.java
+index 8a5e93961dac4d87c81c0e70b6f4124a1f1d2556..0dc94dec1317b3f86d38074c6cbe41ab828cab1d 100644
+--- a/src/main/java/io/papermc/paper/chunk/system/ChunkSystem.java
++++ b/src/main/java/io/papermc/paper/chunk/system/ChunkSystem.java
+@@ -31,191 +31,41 @@ public final class ChunkSystem {
+ }
+
+ public static void scheduleChunkTask(final ServerLevel level, final int chunkX, final int chunkZ, final Runnable run, final PrioritisedExecutor.Priority priority) {
+- level.chunkSource.mainThreadProcessor.execute(run);
++ level.chunkTaskScheduler.scheduleChunkTask(chunkX, chunkZ, run, priority); // Paper - rewrite chunk system
+ }
+
+ public static void scheduleChunkLoad(final ServerLevel level, final int chunkX, final int chunkZ, final boolean gen,
+ final ChunkStatus toStatus, final boolean addTicket, final PrioritisedExecutor.Priority priority,
+ final Consumer onComplete) {
+- if (gen) {
+- scheduleChunkLoad(level, chunkX, chunkZ, toStatus, addTicket, priority, onComplete);
+- return;
+- }
+- scheduleChunkLoad(level, chunkX, chunkZ, ChunkStatus.EMPTY, addTicket, priority, (final ChunkAccess chunk) -> {
+- if (chunk == null) {
+- onComplete.accept(null);
+- } else {
+- if (chunk.getStatus().isOrAfter(toStatus)) {
+- scheduleChunkLoad(level, chunkX, chunkZ, toStatus, addTicket, priority, onComplete);
+- } else {
+- onComplete.accept(null);
+- }
+- }
+- });
++ level.chunkTaskScheduler.scheduleChunkLoad(chunkX, chunkZ, gen, toStatus, addTicket, priority, onComplete); // Paper - rewrite chunk system
+ }
+
+- static final TicketType CHUNK_LOAD = TicketType.create("chunk_load", Long::compareTo);
+-
+- private static long chunkLoadCounter = 0L;
++ // Paper - rewrite chunk system
+ public static void scheduleChunkLoad(final ServerLevel level, final int chunkX, final int chunkZ, final ChunkStatus toStatus,
+ final boolean addTicket, final PrioritisedExecutor.Priority priority, final Consumer onComplete) {
+- if (!Bukkit.isPrimaryThread()) {
+- scheduleChunkTask(level, chunkX, chunkZ, () -> {
+- scheduleChunkLoad(level, chunkX, chunkZ, toStatus, addTicket, priority, onComplete);
+- }, priority);
+- return;
+- }
+-
+- final int minLevel = 33 + ChunkStatus.getDistance(toStatus);
+- final Long chunkReference = addTicket ? Long.valueOf(++chunkLoadCounter) : null;
+- final ChunkPos chunkPos = new ChunkPos(chunkX, chunkZ);
+-
+- if (addTicket) {
+- level.chunkSource.addTicketAtLevel(CHUNK_LOAD, chunkPos, minLevel, chunkReference);
+- }
+- level.chunkSource.runDistanceManagerUpdates();
+-
+- final Consumer loadCallback = (final ChunkAccess chunk) -> {
+- try {
+- if (onComplete != null) {
+- onComplete.accept(chunk);
+- }
+- } catch (final ThreadDeath death) {
+- throw death;
+- } catch (final Throwable thr) {
+- LOGGER.error("Exception handling chunk load callback", thr);
+- SneakyThrow.sneaky(thr);
+- } finally {
+- if (addTicket) {
+- level.chunkSource.addTicketAtLevel(TicketType.UNKNOWN, chunkPos, minLevel, chunkPos);
+- level.chunkSource.removeTicketAtLevel(CHUNK_LOAD, chunkPos, minLevel, chunkReference);
+- }
+- }
+- };
+-
+- final ChunkHolder holder = level.chunkSource.chunkMap.getUpdatingChunkIfPresent(CoordinateUtils.getChunkKey(chunkX, chunkZ));
+-
+- if (holder == null || holder.getTicketLevel() > minLevel) {
+- loadCallback.accept(null);
+- return;
+- }
+-
+- final CompletableFuture> loadFuture = holder.getOrScheduleFuture(toStatus, level.chunkSource.chunkMap);
+-
+- if (loadFuture.isDone()) {
+- loadCallback.accept(loadFuture.join().left().orElse(null));
+- return;
+- }
+-
+- loadFuture.whenCompleteAsync((final Either either, final Throwable thr) -> {
+- if (thr != null) {
+- loadCallback.accept(null);
+- return;
+- }
+- loadCallback.accept(either.left().orElse(null));
+- }, (final Runnable r) -> {
+- scheduleChunkTask(level, chunkX, chunkZ, r, PrioritisedExecutor.Priority.HIGHEST);
+- });
++ level.chunkTaskScheduler.scheduleChunkLoad(chunkX, chunkZ, toStatus, addTicket, priority, onComplete); // Paper - rewrite chunk system
+ }
+
+ public static void scheduleTickingState(final ServerLevel level, final int chunkX, final int chunkZ,
+ final ChunkHolder.FullChunkStatus toStatus, final boolean addTicket,
+ final PrioritisedExecutor.Priority priority, final Consumer onComplete) {
+- if (toStatus == ChunkHolder.FullChunkStatus.INACCESSIBLE) {
+- throw new IllegalArgumentException("Cannot wait for INACCESSIBLE status");
+- }
+-
+- if (!Bukkit.isPrimaryThread()) {
+- scheduleChunkTask(level, chunkX, chunkZ, () -> {
+- scheduleTickingState(level, chunkX, chunkZ, toStatus, addTicket, priority, onComplete);
+- }, priority);
+- return;
+- }
+-
+- final int minLevel = 33 - (toStatus.ordinal() - 1);
+- final int radius = toStatus.ordinal() - 1;
+- final Long chunkReference = addTicket ? Long.valueOf(++chunkLoadCounter) : null;
+- final ChunkPos chunkPos = new ChunkPos(chunkX, chunkZ);
+-
+- if (addTicket) {
+- level.chunkSource.addTicketAtLevel(CHUNK_LOAD, chunkPos, minLevel, chunkReference);
+- }
+- level.chunkSource.runDistanceManagerUpdates();
+-
+- final Consumer loadCallback = (final LevelChunk chunk) -> {
+- try {
+- if (onComplete != null) {
+- onComplete.accept(chunk);
+- }
+- } catch (final ThreadDeath death) {
+- throw death;
+- } catch (final Throwable thr) {
+- LOGGER.error("Exception handling chunk load callback", thr);
+- SneakyThrow.sneaky(thr);
+- } finally {
+- if (addTicket) {
+- level.chunkSource.addTicketAtLevel(TicketType.UNKNOWN, chunkPos, minLevel, chunkPos);
+- level.chunkSource.removeTicketAtLevel(CHUNK_LOAD, chunkPos, minLevel, chunkReference);
+- }
+- }
+- };
+-
+- final ChunkHolder holder = level.chunkSource.chunkMap.getUpdatingChunkIfPresent(CoordinateUtils.getChunkKey(chunkX, chunkZ));
+-
+- if (holder == null || holder.getTicketLevel() > minLevel) {
+- loadCallback.accept(null);
+- return;
+- }
+-
+- final CompletableFuture> tickingState;
+- switch (toStatus) {
+- case BORDER: {
+- tickingState = holder.getFullChunkFuture();
+- break;
+- }
+- case TICKING: {
+- tickingState = holder.getTickingChunkFuture();
+- break;
+- }
+- case ENTITY_TICKING: {
+- tickingState = holder.getEntityTickingChunkFuture();
+- break;
+- }
+- default: {
+- throw new IllegalStateException("Cannot reach here");
+- }
+- }
+-
+- if (tickingState.isDone()) {
+- loadCallback.accept(tickingState.join().left().orElse(null));
+- return;
+- }
+-
+- tickingState.whenCompleteAsync((final Either either, final Throwable thr) -> {
+- if (thr != null) {
+- loadCallback.accept(null);
+- return;
+- }
+- loadCallback.accept(either.left().orElse(null));
+- }, (final Runnable r) -> {
+- scheduleChunkTask(level, chunkX, chunkZ, r, PrioritisedExecutor.Priority.HIGHEST);
+- });
++ level.chunkTaskScheduler.scheduleTickingState(chunkX, chunkZ, toStatus, addTicket, priority, onComplete); // Paper - rewrite chunk system
+ }
+
+ public static List getVisibleChunkHolders(final ServerLevel level) {
+- return new ArrayList<>(level.chunkSource.chunkMap.visibleChunkMap.values());
++ return level.chunkTaskScheduler.chunkHolderManager.getOldChunkHolders(); // Paper - rewrite chunk system
+ }
+
+ public static List getUpdatingChunkHolders(final ServerLevel level) {
+- return new ArrayList<>(level.chunkSource.chunkMap.updatingChunkMap.values());
++ return level.chunkTaskScheduler.chunkHolderManager.getOldChunkHolders(); // Paper - rewrite chunk system
+ }
+
+ public static int getVisibleChunkHolderCount(final ServerLevel level) {
+- return level.chunkSource.chunkMap.visibleChunkMap.size();
++ return level.chunkTaskScheduler.chunkHolderManager.size(); // Paper - rewrite chunk system
+ }
+
+ public static int getUpdatingChunkHolderCount(final ServerLevel level) {
+- return level.chunkSource.chunkMap.updatingChunkMap.size();
++ return level.chunkTaskScheduler.chunkHolderManager.size(); // Paper - rewrite chunk system
+ }
+
+ public static boolean hasAnyChunkHolders(final ServerLevel level) {
+@@ -269,23 +119,15 @@ public final class ChunkSystem {
+ }
+
+ public static int getSendViewDistance(final ServerPlayer player) {
+- return getLoadViewDistance(player);
++ return io.papermc.paper.chunk.PlayerChunkLoader.getSendViewDistance(player);
+ }
+
+ public static int getLoadViewDistance(final ServerPlayer player) {
+- final ServerLevel level = player.getLevel();
+- if (level == null) {
+- return Bukkit.getViewDistance() + 1;
+- }
+- return level.chunkSource.chunkMap.getEffectiveViewDistance() + 1;
++ return io.papermc.paper.chunk.PlayerChunkLoader.getLoadViewDistance(player);
+ }
+
+ public static int getTickViewDistance(final ServerPlayer player) {
+- final ServerLevel level = player.getLevel();
+- if (level == null) {
+- return Bukkit.getSimulationDistance();
+- }
+- return level.chunkSource.chunkMap.distanceManager.getSimulationDistance();
++ return io.papermc.paper.chunk.PlayerChunkLoader.getTickViewDistance(player);
+ }
+
+ private ChunkSystem() {
+diff --git a/src/main/java/io/papermc/paper/chunk/system/entity/EntityLookup.java b/src/main/java/io/papermc/paper/chunk/system/entity/EntityLookup.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..61c170555c8854b102c640b0b6a615f9f732edbf
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/chunk/system/entity/EntityLookup.java
+@@ -0,0 +1,839 @@
++package io.papermc.paper.chunk.system.entity;
++
++import com.destroystokyo.paper.util.maplist.EntityList;
++import com.mojang.logging.LogUtils;
++import io.papermc.paper.util.CoordinateUtils;
++import io.papermc.paper.util.TickThread;
++import io.papermc.paper.util.WorldUtil;
++import io.papermc.paper.world.ChunkEntitySlices;
++import it.unimi.dsi.fastutil.ints.Int2ReferenceOpenHashMap;
++import it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap;
++import it.unimi.dsi.fastutil.objects.Object2ReferenceOpenHashMap;
++import net.minecraft.core.BlockPos;
++import io.papermc.paper.chunk.system.ChunkSystem;
++import net.minecraft.server.level.ChunkHolder;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.util.AbortableIterationConsumer;
++import net.minecraft.util.Mth;
++import net.minecraft.world.entity.Entity;
++import net.minecraft.world.entity.EntityType;
++import net.minecraft.world.level.entity.EntityInLevelCallback;
++import net.minecraft.world.level.entity.EntityTypeTest;
++import net.minecraft.world.level.entity.LevelCallback;
++import net.minecraft.world.level.entity.LevelEntityGetter;
++import net.minecraft.world.level.entity.Visibility;
++import net.minecraft.world.phys.AABB;
++import org.jetbrains.annotations.NotNull;
++import org.jetbrains.annotations.Nullable;
++import org.slf4j.Logger;
++import java.util.ArrayList;
++import java.util.Iterator;
++import java.util.List;
++import java.util.NoSuchElementException;
++import java.util.UUID;
++import java.util.concurrent.locks.StampedLock;
++import java.util.function.Consumer;
++import java.util.function.Predicate;
++
++public final class EntityLookup implements LevelEntityGetter {
++
++ private static final Logger LOGGER = LogUtils.getClassLogger();
++
++ protected static final int REGION_SHIFT = 5;
++ protected static final int REGION_MASK = (1 << REGION_SHIFT) - 1;
++ protected static final int REGION_SIZE = 1 << REGION_SHIFT;
++
++ public final ServerLevel world;
++
++ private final StampedLock stateLock = new StampedLock();
++ protected final Long2ObjectOpenHashMap regions = new Long2ObjectOpenHashMap<>(128, 0.5f);
++
++ private final int minSection; // inclusive
++ private final int maxSection; // inclusive
++ private final LevelCallback worldCallback;
++
++ private final StampedLock entityByLock = new StampedLock();
++ private final Int2ReferenceOpenHashMap entityById = new Int2ReferenceOpenHashMap<>();
++ private final Object2ReferenceOpenHashMap entityByUUID = new Object2ReferenceOpenHashMap<>();
++ private final EntityList accessibleEntities = new EntityList();
++
++ public EntityLookup(final ServerLevel world, final LevelCallback worldCallback) {
++ this.world = world;
++ this.minSection = WorldUtil.getMinSection(world);
++ this.maxSection = WorldUtil.getMaxSection(world);
++ this.worldCallback = worldCallback;
++ }
++
++ private static Entity maskNonAccessible(final Entity entity) {
++ if (entity == null) {
++ return null;
++ }
++ final Visibility visibility = EntityLookup.getEntityStatus(entity);
++ return visibility.isAccessible() ? entity : null;
++ }
++
++ @Nullable
++ @Override
++ public Entity get(final int id) {
++ final long attempt = this.entityByLock.tryOptimisticRead();
++ if (attempt != 0L) {
++ try {
++ final Entity ret = this.entityById.get(id);
++
++ if (this.entityByLock.validate(attempt)) {
++ return maskNonAccessible(ret);
++ }
++ } catch (final Error error) {
++ throw error;
++ } catch (final Throwable thr) {
++ // ignore
++ }
++ }
++
++ this.entityByLock.readLock();
++ try {
++ return maskNonAccessible(this.entityById.get(id));
++ } finally {
++ this.entityByLock.tryUnlockRead();
++ }
++ }
++
++ @Nullable
++ @Override
++ public Entity get(final UUID id) {
++ final long attempt = this.entityByLock.tryOptimisticRead();
++ if (attempt != 0L) {
++ try {
++ final Entity ret = this.entityByUUID.get(id);
++
++ if (this.entityByLock.validate(attempt)) {
++ return maskNonAccessible(ret);
++ }
++ } catch (final Error error) {
++ throw error;
++ } catch (final Throwable thr) {
++ // ignore
++ }
++ }
++
++ this.entityByLock.readLock();
++ try {
++ return maskNonAccessible(this.entityByUUID.get(id));
++ } finally {
++ this.entityByLock.tryUnlockRead();
++ }
++ }
++
++ public boolean hasEntity(final UUID uuid) {
++ return this.get(uuid) != null;
++ }
++
++ public String getDebugInfo() {
++ return "count_id:" + this.entityById.size() + ",count_uuid:" + this.entityByUUID.size() + ",region_count:" + this.regions.size();
++ }
++
++ static final class ArrayIterable implements Iterable {
++
++ private final T[] array;
++ private final int off;
++ private final int length;
++
++ public ArrayIterable(final T[] array, final int off, final int length) {
++ this.array = array;
++ this.off = off;
++ this.length = length;
++ if (length > array.length) {
++ throw new IllegalArgumentException("Length must be no greater-than the array length");
++ }
++ }
++
++ @NotNull
++ @Override
++ public Iterator iterator() {
++ return new ArrayIterator<>(this.array, this.off, this.length);
++ }
++
++ static final class ArrayIterator implements Iterator {
++
++ private final T[] array;
++ private int off;
++ private final int length;
++
++ public ArrayIterator(final T[] array, final int off, final int length) {
++ this.array = array;
++ this.off = off;
++ this.length = length;
++ }
++
++ @Override
++ public boolean hasNext() {
++ return this.off < this.length;
++ }
++
++ @Override
++ public T next() {
++ if (this.off >= this.length) {
++ throw new NoSuchElementException();
++ }
++ return this.array[this.off++];
++ }
++
++ @Override
++ public void remove() {
++ throw new UnsupportedOperationException();
++ }
++ }
++ }
++
++ @Override
++ public Iterable getAll() {
++ return new ArrayIterable<>(this.accessibleEntities.getRawData(), 0, this.accessibleEntities.size());
++ }
++
++ @Override
++ public void get(final EntityTypeTest filter, final AbortableIterationConsumer action) {
++ for (final Entity entity : this.entityById.values()) {
++ final Visibility visibility = EntityLookup.getEntityStatus(entity);
++ if (!visibility.isAccessible()) {
++ continue;
++ }
++ final U casted = filter.tryCast(entity);
++ if (casted != null && action.accept(casted).shouldAbort()) {
++ break;
++ }
++ }
++ }
++
++ @Override
++ public void get(final AABB box, final Consumer action) {
++ List entities = new ArrayList<>();
++ this.getEntitiesWithoutDragonParts(null, box, entities, null);
++ for (int i = 0, len = entities.size(); i < len; ++i) {
++ action.accept(entities.get(i));
++ }
++ }
++
++ @Override
++ public void get(final EntityTypeTest filter, final AABB box, final AbortableIterationConsumer action) {
++ List entities = new ArrayList<>();
++ this.getEntitiesWithoutDragonParts(null, box, entities, null);
++ for (int i = 0, len = entities.size(); i < len; ++i) {
++ final U casted = filter.tryCast(entities.get(i));
++ if (casted != null && action.accept(casted).shouldAbort()) {
++ break;
++ }
++ }
++ }
++
++ public void entityStatusChange(final Entity entity, final ChunkEntitySlices slices, final Visibility oldVisibility, final Visibility newVisibility, final boolean moved,
++ final boolean created, final boolean destroyed) {
++ TickThread.ensureTickThread(entity, "Entity status change must only happen on the main thread");
++
++ if (entity.updatingSectionStatus) {
++ // recursive status update
++ LOGGER.error("Cannot recursively update entity chunk status for entity " + entity, new Throwable());
++ return;
++ }
++
++ final boolean entityStatusUpdateBefore = slices == null ? false : slices.startPreventingStatusUpdates();
++
++ if (entityStatusUpdateBefore) {
++ LOGGER.error("Cannot update chunk status for entity " + entity + " since entity chunk (" + slices.chunkX + "," + slices.chunkZ + ") is receiving update", new Throwable());
++ return;
++ }
++
++ try {
++ final Boolean ticketBlockBefore = this.world.chunkTaskScheduler.chunkHolderManager.blockTicketUpdates();
++ try {
++ entity.updatingSectionStatus = true;
++ try {
++ if (created) {
++ EntityLookup.this.worldCallback.onCreated(entity);
++ }
++
++ if (oldVisibility == newVisibility) {
++ if (moved && newVisibility.isAccessible()) {
++ EntityLookup.this.worldCallback.onSectionChange(entity);
++ }
++ return;
++ }
++
++ if (newVisibility.ordinal() > oldVisibility.ordinal()) {
++ // status upgrade
++ if (!oldVisibility.isAccessible() && newVisibility.isAccessible()) {
++ this.accessibleEntities.add(entity);
++ EntityLookup.this.worldCallback.onTrackingStart(entity);
++ }
++
++ if (!oldVisibility.isTicking() && newVisibility.isTicking()) {
++ EntityLookup.this.worldCallback.onTickingStart(entity);
++ }
++ } else {
++ // status downgrade
++ if (oldVisibility.isTicking() && !newVisibility.isTicking()) {
++ EntityLookup.this.worldCallback.onTickingEnd(entity);
++ }
++
++ if (oldVisibility.isAccessible() && !newVisibility.isAccessible()) {
++ this.accessibleEntities.remove(entity);
++ EntityLookup.this.worldCallback.onTrackingEnd(entity);
++ }
++ }
++
++ if (moved && newVisibility.isAccessible()) {
++ EntityLookup.this.worldCallback.onSectionChange(entity);
++ }
++
++ if (destroyed) {
++ EntityLookup.this.worldCallback.onDestroyed(entity);
++ }
++ } finally {
++ entity.updatingSectionStatus = false;
++ }
++ } finally {
++ this.world.chunkTaskScheduler.chunkHolderManager.unblockTicketUpdates(ticketBlockBefore);
++ }
++ } finally {
++ if (slices != null) {
++ slices.stopPreventingStatusUpdates(false);
++ }
++ }
++ }
++
++ public void chunkStatusChange(final int x, final int z, final ChunkHolder.FullChunkStatus newStatus) {
++ this.getChunk(x, z).updateStatus(newStatus, this);
++ }
++
++ public void addLegacyChunkEntities(final List entities) {
++ for (int i = 0, len = entities.size(); i < len; ++i) {
++ this.addEntity(entities.get(i), true);
++ }
++ }
++
++ public void addEntityChunkEntities(final List entities) {
++ for (int i = 0, len = entities.size(); i < len; ++i) {
++ this.addEntity(entities.get(i), true);
++ }
++ }
++
++ public void addWorldGenChunkEntities(final List entities) {
++ for (int i = 0, len = entities.size(); i < len; ++i) {
++ this.addEntity(entities.get(i), false);
++ }
++ }
++
++ public boolean addNewEntity(final Entity entity) {
++ return this.addEntity(entity, false);
++ }
++
++ public static Visibility getEntityStatus(final Entity entity) {
++ if (entity.isAlwaysTicking()) {
++ return Visibility.TICKING;
++ }
++ final ChunkHolder.FullChunkStatus entityStatus = entity.chunkStatus;
++ return Visibility.fromFullChunkStatus(entityStatus == null ? ChunkHolder.FullChunkStatus.INACCESSIBLE : entityStatus);
++ }
++
++ private boolean addEntity(final Entity entity, final boolean fromDisk) {
++ final BlockPos pos = entity.blockPosition();
++ final int sectionX = pos.getX() >> 4;
++ final int sectionY = Mth.clamp(pos.getY() >> 4, this.minSection, this.maxSection);
++ final int sectionZ = pos.getZ() >> 4;
++ TickThread.ensureTickThread(this.world, sectionX, sectionZ, "Cannot add entity off-main thread");
++
++ if (entity.isRemoved()) {
++ LOGGER.warn("Refusing to add removed entity: " + entity);
++ return false;
++ }
++
++ if (entity.updatingSectionStatus) {
++ LOGGER.warn("Entity " + entity + " is currently prevented from being added/removed to world since it is processing section status updates", new Throwable());
++ return false;
++ }
++
++ if (fromDisk) {
++ ChunkSystem.onEntityPreAdd(this.world, entity);
++ if (entity.isRemoved()) {
++ // removed from checkDupeUUID call
++ return false;
++ }
++ }
++
++ this.entityByLock.writeLock();
++ try {
++ if (this.entityById.containsKey(entity.getId())) {
++ LOGGER.warn("Entity id already exists: " + entity.getId() + ", mapped to " + this.entityById.get(entity.getId()) + ", can't add " + entity);
++ return false;
++ }
++ if (this.entityByUUID.containsKey(entity.getUUID())) {
++ LOGGER.warn("Entity uuid already exists: " + entity.getUUID() + ", mapped to " + this.entityByUUID.get(entity.getUUID()) + ", can't add " + entity);
++ return false;
++ }
++ this.entityById.put(entity.getId(), entity);
++ this.entityByUUID.put(entity.getUUID(), entity);
++ } finally {
++ this.entityByLock.tryUnlockWrite();
++ }
++
++ entity.sectionX = sectionX;
++ entity.sectionY = sectionY;
++ entity.sectionZ = sectionZ;
++ final ChunkEntitySlices slices = this.getOrCreateChunk(sectionX, sectionZ);
++ if (!slices.addEntity(entity, sectionY)) {
++ LOGGER.warn("Entity " + entity + " added to world '" + this.world.getWorld().getName() + "', but was already contained in entity chunk (" + sectionX + "," + sectionZ + ")");
++ }
++
++ entity.setLevelCallback(new EntityCallback(entity));
++
++ this.entityStatusChange(entity, slices, Visibility.HIDDEN, getEntityStatus(entity), false, !fromDisk, false);
++
++ return true;
++ }
++
++ private void removeEntity(final Entity entity) {
++ final int sectionX = entity.sectionX;
++ final int sectionY = entity.sectionY;
++ final int sectionZ = entity.sectionZ;
++ TickThread.ensureTickThread(this.world, sectionX, sectionZ, "Cannot remove entity off-main");
++ if (!entity.isRemoved()) {
++ throw new IllegalStateException("Only call Entity#setRemoved to remove an entity");
++ }
++ final ChunkEntitySlices slices = this.getChunk(sectionX, sectionZ);
++ // all entities should be in a chunk
++ if (slices == null) {
++ LOGGER.warn("Cannot remove entity " + entity + " from null entity slices (" + sectionX + "," + sectionZ + ")");
++ } else {
++ if (!slices.removeEntity(entity, sectionY)) {
++ LOGGER.warn("Failed to remove entity " + entity + " from entity slices (" + sectionX + "," + sectionZ + ")");
++ }
++ }
++ entity.sectionX = entity.sectionY = entity.sectionZ = Integer.MIN_VALUE;
++
++ this.entityByLock.writeLock();
++ try {
++ if (!this.entityById.remove(entity.getId(), entity)) {
++ LOGGER.warn("Failed to remove entity " + entity + " by id, current entity mapped: " + this.entityById.get(entity.getId()));
++ }
++ if (!this.entityByUUID.remove(entity.getUUID(), entity)) {
++ LOGGER.warn("Failed to remove entity " + entity + " by uuid, current entity mapped: " + this.entityByUUID.get(entity.getUUID()));
++ }
++ } finally {
++ this.entityByLock.tryUnlockWrite();
++ }
++ }
++
++ private ChunkEntitySlices moveEntity(final Entity entity) {
++ // ensure we own the entity
++ TickThread.ensureTickThread(entity, "Cannot move entity off-main");
++
++ final BlockPos newPos = entity.blockPosition();
++ final int newSectionX = newPos.getX() >> 4;
++ final int newSectionY = Mth.clamp(newPos.getY() >> 4, this.minSection, this.maxSection);
++ final int newSectionZ = newPos.getZ() >> 4;
++
++ if (newSectionX == entity.sectionX && newSectionY == entity.sectionY && newSectionZ == entity.sectionZ) {
++ return null;
++ }
++
++ // ensure the new section is owned by this tick thread
++ TickThread.ensureTickThread(this.world, newSectionX, newSectionZ, "Cannot move entity off-main");
++
++ // ensure the old section is owned by this tick thread
++ TickThread.ensureTickThread(this.world, entity.sectionX, entity.sectionZ, "Cannot move entity off-main");
++
++ final ChunkEntitySlices old = this.getChunk(entity.sectionX, entity.sectionZ);
++ final ChunkEntitySlices slices = this.getOrCreateChunk(newSectionX, newSectionZ);
++
++ if (!old.removeEntity(entity, entity.sectionY)) {
++ LOGGER.warn("Could not remove entity " + entity + " from its old chunk section (" + entity.sectionX + "," + entity.sectionY + "," + entity.sectionZ + ") since it was not contained in the section");
++ }
++
++ if (!slices.addEntity(entity, newSectionY)) {
++ LOGGER.warn("Could not add entity " + entity + " to its new chunk section (" + newSectionX + "," + newSectionY + "," + newSectionZ + ") as it is already contained in the section");
++ }
++
++ entity.sectionX = newSectionX;
++ entity.sectionY = newSectionY;
++ entity.sectionZ = newSectionZ;
++
++ return slices;
++ }
++
++ public void getEntitiesWithoutDragonParts(final Entity except, final AABB box, final List into, final Predicate super Entity> predicate) {
++ final int minChunkX = (Mth.floor(box.minX) - 2) >> 4;
++ final int minChunkZ = (Mth.floor(box.minZ) - 2) >> 4;
++ final int maxChunkX = (Mth.floor(box.maxX) + 2) >> 4;
++ final int maxChunkZ = (Mth.floor(box.maxZ) + 2) >> 4;
++
++ final int minRegionX = minChunkX >> REGION_SHIFT;
++ final int minRegionZ = minChunkZ >> REGION_SHIFT;
++ final int maxRegionX = maxChunkX >> REGION_SHIFT;
++ final int maxRegionZ = maxChunkZ >> REGION_SHIFT;
++
++ for (int currRegionZ = minRegionZ; currRegionZ <= maxRegionZ; ++currRegionZ) {
++ final int minZ = currRegionZ == minRegionZ ? minChunkZ & REGION_MASK : 0;
++ final int maxZ = currRegionZ == maxRegionZ ? maxChunkZ & REGION_MASK : REGION_MASK;
++
++ for (int currRegionX = minRegionX; currRegionX <= maxRegionX; ++currRegionX) {
++ final ChunkSlicesRegion region = this.getRegion(currRegionX, currRegionZ);
++
++ if (region == null) {
++ continue;
++ }
++
++ final int minX = currRegionX == minRegionX ? minChunkX & REGION_MASK : 0;
++ final int maxX = currRegionX == maxRegionX ? maxChunkX & REGION_MASK : REGION_MASK;
++
++ for (int currZ = minZ; currZ <= maxZ; ++currZ) {
++ for (int currX = minX; currX <= maxX; ++currX) {
++ final ChunkEntitySlices chunk = region.get(currX | (currZ << REGION_SHIFT));
++ if (chunk == null || !chunk.status.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
++ continue;
++ }
++
++ chunk.getEntitiesWithoutDragonParts(except, box, into, predicate);
++ }
++ }
++ }
++ }
++ }
++
++ public void getEntities(final Entity except, final AABB box, final List into, final Predicate super Entity> predicate) {
++ final int minChunkX = (Mth.floor(box.minX) - 2) >> 4;
++ final int minChunkZ = (Mth.floor(box.minZ) - 2) >> 4;
++ final int maxChunkX = (Mth.floor(box.maxX) + 2) >> 4;
++ final int maxChunkZ = (Mth.floor(box.maxZ) + 2) >> 4;
++
++ final int minRegionX = minChunkX >> REGION_SHIFT;
++ final int minRegionZ = minChunkZ >> REGION_SHIFT;
++ final int maxRegionX = maxChunkX >> REGION_SHIFT;
++ final int maxRegionZ = maxChunkZ >> REGION_SHIFT;
++
++ for (int currRegionZ = minRegionZ; currRegionZ <= maxRegionZ; ++currRegionZ) {
++ final int minZ = currRegionZ == minRegionZ ? minChunkZ & REGION_MASK : 0;
++ final int maxZ = currRegionZ == maxRegionZ ? maxChunkZ & REGION_MASK : REGION_MASK;
++
++ for (int currRegionX = minRegionX; currRegionX <= maxRegionX; ++currRegionX) {
++ final ChunkSlicesRegion region = this.getRegion(currRegionX, currRegionZ);
++
++ if (region == null) {
++ continue;
++ }
++
++ final int minX = currRegionX == minRegionX ? minChunkX & REGION_MASK : 0;
++ final int maxX = currRegionX == maxRegionX ? maxChunkX & REGION_MASK : REGION_MASK;
++
++ for (int currZ = minZ; currZ <= maxZ; ++currZ) {
++ for (int currX = minX; currX <= maxX; ++currX) {
++ final ChunkEntitySlices chunk = region.get(currX | (currZ << REGION_SHIFT));
++ if (chunk == null || !chunk.status.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
++ continue;
++ }
++
++ chunk.getEntities(except, box, into, predicate);
++ }
++ }
++ }
++ }
++ }
++
++ public void getHardCollidingEntities(final Entity except, final AABB box, final List into, final Predicate super Entity> predicate) {
++ final int minChunkX = (Mth.floor(box.minX) - 2) >> 4;
++ final int minChunkZ = (Mth.floor(box.minZ) - 2) >> 4;
++ final int maxChunkX = (Mth.floor(box.maxX) + 2) >> 4;
++ final int maxChunkZ = (Mth.floor(box.maxZ) + 2) >> 4;
++
++ final int minRegionX = minChunkX >> REGION_SHIFT;
++ final int minRegionZ = minChunkZ >> REGION_SHIFT;
++ final int maxRegionX = maxChunkX >> REGION_SHIFT;
++ final int maxRegionZ = maxChunkZ >> REGION_SHIFT;
++
++ for (int currRegionZ = minRegionZ; currRegionZ <= maxRegionZ; ++currRegionZ) {
++ final int minZ = currRegionZ == minRegionZ ? minChunkZ & REGION_MASK : 0;
++ final int maxZ = currRegionZ == maxRegionZ ? maxChunkZ & REGION_MASK : REGION_MASK;
++
++ for (int currRegionX = minRegionX; currRegionX <= maxRegionX; ++currRegionX) {
++ final ChunkSlicesRegion region = this.getRegion(currRegionX, currRegionZ);
++
++ if (region == null) {
++ continue;
++ }
++
++ final int minX = currRegionX == minRegionX ? minChunkX & REGION_MASK : 0;
++ final int maxX = currRegionX == maxRegionX ? maxChunkX & REGION_MASK : REGION_MASK;
++
++ for (int currZ = minZ; currZ <= maxZ; ++currZ) {
++ for (int currX = minX; currX <= maxX; ++currX) {
++ final ChunkEntitySlices chunk = region.get(currX | (currZ << REGION_SHIFT));
++ if (chunk == null || !chunk.status.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
++ continue;
++ }
++
++ chunk.getHardCollidingEntities(except, box, into, predicate);
++ }
++ }
++ }
++ }
++ }
++
++ public void getEntities(final EntityType> type, final AABB box, final List super T> into,
++ final Predicate super T> predicate) {
++ final int minChunkX = (Mth.floor(box.minX) - 2) >> 4;
++ final int minChunkZ = (Mth.floor(box.minZ) - 2) >> 4;
++ final int maxChunkX = (Mth.floor(box.maxX) + 2) >> 4;
++ final int maxChunkZ = (Mth.floor(box.maxZ) + 2) >> 4;
++
++ final int minRegionX = minChunkX >> REGION_SHIFT;
++ final int minRegionZ = minChunkZ >> REGION_SHIFT;
++ final int maxRegionX = maxChunkX >> REGION_SHIFT;
++ final int maxRegionZ = maxChunkZ >> REGION_SHIFT;
++
++ for (int currRegionZ = minRegionZ; currRegionZ <= maxRegionZ; ++currRegionZ) {
++ final int minZ = currRegionZ == minRegionZ ? minChunkZ & REGION_MASK : 0;
++ final int maxZ = currRegionZ == maxRegionZ ? maxChunkZ & REGION_MASK : REGION_MASK;
++
++ for (int currRegionX = minRegionX; currRegionX <= maxRegionX; ++currRegionX) {
++ final ChunkSlicesRegion region = this.getRegion(currRegionX, currRegionZ);
++
++ if (region == null) {
++ continue;
++ }
++
++ final int minX = currRegionX == minRegionX ? minChunkX & REGION_MASK : 0;
++ final int maxX = currRegionX == maxRegionX ? maxChunkX & REGION_MASK : REGION_MASK;
++
++ for (int currZ = minZ; currZ <= maxZ; ++currZ) {
++ for (int currX = minX; currX <= maxX; ++currX) {
++ final ChunkEntitySlices chunk = region.get(currX | (currZ << REGION_SHIFT));
++ if (chunk == null || !chunk.status.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
++ continue;
++ }
++
++ chunk.getEntities(type, box, (List)into, (Predicate)predicate);
++ }
++ }
++ }
++ }
++ }
++
++ public void getEntities(final Class extends T> clazz, final Entity except, final AABB box, final List super T> into,
++ final Predicate super T> predicate) {
++ final int minChunkX = (Mth.floor(box.minX) - 2) >> 4;
++ final int minChunkZ = (Mth.floor(box.minZ) - 2) >> 4;
++ final int maxChunkX = (Mth.floor(box.maxX) + 2) >> 4;
++ final int maxChunkZ = (Mth.floor(box.maxZ) + 2) >> 4;
++
++ final int minRegionX = minChunkX >> REGION_SHIFT;
++ final int minRegionZ = minChunkZ >> REGION_SHIFT;
++ final int maxRegionX = maxChunkX >> REGION_SHIFT;
++ final int maxRegionZ = maxChunkZ >> REGION_SHIFT;
++
++ for (int currRegionZ = minRegionZ; currRegionZ <= maxRegionZ; ++currRegionZ) {
++ final int minZ = currRegionZ == minRegionZ ? minChunkZ & REGION_MASK : 0;
++ final int maxZ = currRegionZ == maxRegionZ ? maxChunkZ & REGION_MASK : REGION_MASK;
++
++ for (int currRegionX = minRegionX; currRegionX <= maxRegionX; ++currRegionX) {
++ final ChunkSlicesRegion region = this.getRegion(currRegionX, currRegionZ);
++
++ if (region == null) {
++ continue;
++ }
++
++ final int minX = currRegionX == minRegionX ? minChunkX & REGION_MASK : 0;
++ final int maxX = currRegionX == maxRegionX ? maxChunkX & REGION_MASK : REGION_MASK;
++
++ for (int currZ = minZ; currZ <= maxZ; ++currZ) {
++ for (int currX = minX; currX <= maxX; ++currX) {
++ final ChunkEntitySlices chunk = region.get(currX | (currZ << REGION_SHIFT));
++ if (chunk == null || !chunk.status.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
++ continue;
++ }
++
++ chunk.getEntities(clazz, except, box, into, predicate);
++ }
++ }
++ }
++ }
++ }
++
++ public void entitySectionLoad(final int chunkX, final int chunkZ, final ChunkEntitySlices slices) {
++ TickThread.ensureTickThread(this.world, chunkX, chunkZ, "Cannot load in entity section off-main");
++ synchronized (this) {
++ final ChunkEntitySlices curr = this.getChunk(chunkX, chunkZ);
++ if (curr != null) {
++ this.removeChunk(chunkX, chunkZ);
++
++ curr.mergeInto(slices);
++
++ this.addChunk(chunkX, chunkZ, slices);
++ } else {
++ this.addChunk(chunkX, chunkZ, slices);
++ }
++ }
++ }
++
++ public void entitySectionUnload(final int chunkX, final int chunkZ) {
++ TickThread.ensureTickThread(this.world, chunkX, chunkZ, "Cannot unload entity section off-main");
++ this.removeChunk(chunkX, chunkZ);
++ }
++
++ public ChunkEntitySlices getChunk(final int chunkX, final int chunkZ) {
++ final ChunkSlicesRegion region = this.getRegion(chunkX >> REGION_SHIFT, chunkZ >> REGION_SHIFT);
++ if (region == null) {
++ return null;
++ }
++
++ return region.get((chunkX & REGION_MASK) | ((chunkZ & REGION_MASK) << REGION_SHIFT));
++ }
++
++ public ChunkEntitySlices getOrCreateChunk(final int chunkX, final int chunkZ) {
++ final ChunkSlicesRegion region = this.getRegion(chunkX >> REGION_SHIFT, chunkZ >> REGION_SHIFT);
++ ChunkEntitySlices ret;
++ if (region == null || (ret = region.get((chunkX & REGION_MASK) | ((chunkZ & REGION_MASK) << REGION_SHIFT))) == null) {
++ // loadInEntityChunk will call addChunk for us
++ return this.world.chunkTaskScheduler.chunkHolderManager.getOrCreateEntityChunk(chunkX, chunkZ, true);
++ }
++
++ return ret;
++ }
++
++ public ChunkSlicesRegion getRegion(final int regionX, final int regionZ) {
++ final long key = CoordinateUtils.getChunkKey(regionX, regionZ);
++ final long attempt = this.stateLock.tryOptimisticRead();
++ if (attempt != 0L) {
++ try {
++ final ChunkSlicesRegion ret = this.regions.get(key);
++
++ if (this.stateLock.validate(attempt)) {
++ return ret;
++ }
++ } catch (final Error error) {
++ throw error;
++ } catch (final Throwable thr) {
++ // ignore
++ }
++ }
++
++ this.stateLock.readLock();
++ try {
++ return this.regions.get(key);
++ } finally {
++ this.stateLock.tryUnlockRead();
++ }
++ }
++
++ private synchronized void removeChunk(final int chunkX, final int chunkZ) {
++ final long key = CoordinateUtils.getChunkKey(chunkX >> REGION_SHIFT, chunkZ >> REGION_SHIFT);
++ final int relIndex = (chunkX & REGION_MASK) | ((chunkZ & REGION_MASK) << REGION_SHIFT);
++
++ final ChunkSlicesRegion region = this.regions.get(key);
++ final int remaining = region.remove(relIndex);
++
++ if (remaining == 0) {
++ this.stateLock.writeLock();
++ try {
++ this.regions.remove(key);
++ } finally {
++ this.stateLock.tryUnlockWrite();
++ }
++ }
++ }
++
++ public synchronized void addChunk(final int chunkX, final int chunkZ, final ChunkEntitySlices slices) {
++ final long key = CoordinateUtils.getChunkKey(chunkX >> REGION_SHIFT, chunkZ >> REGION_SHIFT);
++ final int relIndex = (chunkX & REGION_MASK) | ((chunkZ & REGION_MASK) << REGION_SHIFT);
++
++ ChunkSlicesRegion region = this.regions.get(key);
++ if (region != null) {
++ region.add(relIndex, slices);
++ } else {
++ region = new ChunkSlicesRegion();
++ region.add(relIndex, slices);
++ this.stateLock.writeLock();
++ try {
++ this.regions.put(key, region);
++ } finally {
++ this.stateLock.tryUnlockWrite();
++ }
++ }
++ }
++
++ public static final class ChunkSlicesRegion {
++
++ protected final ChunkEntitySlices[] slices = new ChunkEntitySlices[REGION_SIZE * REGION_SIZE];
++ protected int sliceCount;
++
++ public ChunkEntitySlices get(final int index) {
++ return this.slices[index];
++ }
++
++ public int remove(final int index) {
++ final ChunkEntitySlices slices = this.slices[index];
++ if (slices == null) {
++ throw new IllegalStateException();
++ }
++
++ this.slices[index] = null;
++
++ return --this.sliceCount;
++ }
++
++ public void add(final int index, final ChunkEntitySlices slices) {
++ final ChunkEntitySlices curr = this.slices[index];
++ if (curr != null) {
++ throw new IllegalStateException();
++ }
++
++ this.slices[index] = slices;
++
++ ++this.sliceCount;
++ }
++ }
++
++ private final class EntityCallback implements EntityInLevelCallback {
++
++ public final Entity entity;
++
++ public EntityCallback(final Entity entity) {
++ this.entity = entity;
++ }
++
++ @Override
++ public void onMove() {
++ final Entity entity = this.entity;
++ final Visibility oldVisibility = getEntityStatus(entity);
++ final ChunkEntitySlices newSlices = EntityLookup.this.moveEntity(this.entity);
++ if (newSlices == null) {
++ // no new section, so didn't change sections
++ return;
++ }
++ final Visibility newVisibility = getEntityStatus(entity);
++
++ EntityLookup.this.entityStatusChange(entity, newSlices, oldVisibility, newVisibility, true, false, false);
++ }
++
++ @Override
++ public void onRemove(final Entity.RemovalReason reason) {
++ final Entity entity = this.entity;
++ TickThread.ensureTickThread(entity, "Cannot remove entity off-main"); // Paper - rewrite chunk system
++ final Visibility tickingState = EntityLookup.getEntityStatus(entity);
++
++ EntityLookup.this.removeEntity(entity);
++
++ EntityLookup.this.entityStatusChange(entity, null, tickingState, Visibility.HIDDEN, false, false, reason.shouldDestroy());
++
++ this.entity.setLevelCallback(NoOpCallback.INSTANCE);
++ }
++ }
++
++ private static final class NoOpCallback implements EntityInLevelCallback {
++
++ public static final NoOpCallback INSTANCE = new NoOpCallback();
++
++ @Override
++ public void onMove() {}
++
++ @Override
++ public void onRemove(final Entity.RemovalReason reason) {}
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/chunk/system/io/RegionFileIOThread.java b/src/main/java/io/papermc/paper/chunk/system/io/RegionFileIOThread.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..a08cde4eefe879adcee7c4118bc38f98c5097ed0
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/chunk/system/io/RegionFileIOThread.java
+@@ -0,0 +1,1328 @@
++package io.papermc.paper.chunk.system.io;
++
++import ca.spottedleaf.concurrentutil.collection.MultiThreadedQueue;
++import ca.spottedleaf.concurrentutil.executor.Cancellable;
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedQueueExecutorThread;
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedThreadedTaskQueue;
++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
++import com.mojang.logging.LogUtils;
++import io.papermc.paper.util.CoordinateUtils;
++import io.papermc.paper.util.TickThread;
++import it.unimi.dsi.fastutil.HashCommon;
++import net.minecraft.nbt.CompoundTag;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.world.level.ChunkPos;
++import net.minecraft.world.level.chunk.storage.RegionFile;
++import net.minecraft.world.level.chunk.storage.RegionFileStorage;
++import org.slf4j.Logger;
++import java.io.IOException;
++import java.lang.invoke.VarHandle;
++import java.util.concurrent.CompletableFuture;
++import java.util.concurrent.CompletionException;
++import java.util.concurrent.ConcurrentHashMap;
++import java.util.concurrent.atomic.AtomicInteger;
++import java.util.function.BiConsumer;
++import java.util.function.BiFunction;
++import java.util.function.Consumer;
++import java.util.function.Function;
++
++/**
++ * Prioritised RegionFile I/O executor, responsible for all RegionFile access.
++ *
++ * All functions provided are MT-Safe, however certain ordering constraints are recommended:
++ *
++ * Chunk saves may not occur for unloaded chunks.
++ *
++ *
++ * Tasks must be scheduled on the chunk scheduler thread.
++ *
++ * By following these constraints, no chunk data loss should occur with the exception of underlying I/O problems.
++ *
++ */
++public final class RegionFileIOThread extends PrioritisedQueueExecutorThread {
++
++ private static final Logger LOGGER = LogUtils.getClassLogger();
++
++ /**
++ * The kinds of region files controlled by the region file thread. Add more when needed, and ensure
++ * getControllerFor is updated.
++ */
++ public static enum RegionFileType {
++ CHUNK_DATA,
++ POI_DATA,
++ ENTITY_DATA;
++ }
++
++ protected static final RegionFileType[] CACHED_REGIONFILE_TYPES = RegionFileType.values();
++
++ private ChunkDataController getControllerFor(final ServerLevel world, final RegionFileType type) {
++ switch (type) {
++ case CHUNK_DATA:
++ return world.chunkDataControllerNew;
++ case POI_DATA:
++ return world.poiDataControllerNew;
++ case ENTITY_DATA:
++ return world.entityDataControllerNew;
++ default:
++ throw new IllegalStateException("Unknown controller type " + type);
++ }
++ }
++
++ /**
++ * Collects regionfile data for a certain chunk.
++ */
++ public static final class RegionFileData {
++
++ private final boolean[] hasResult = new boolean[CACHED_REGIONFILE_TYPES.length];
++ private final CompoundTag[] data = new CompoundTag[CACHED_REGIONFILE_TYPES.length];
++ private final Throwable[] throwables = new Throwable[CACHED_REGIONFILE_TYPES.length];
++
++ /**
++ * Sets the result associated with the specified regionfile type. Note that
++ * results can only be set once per regionfile type.
++ *
++ * @param type The regionfile type.
++ * @param data The result to set.
++ */
++ public void setData(final RegionFileType type, final CompoundTag data) {
++ final int index = type.ordinal();
++
++ if (this.hasResult[index]) {
++ throw new IllegalArgumentException("Result already exists for type " + type);
++ }
++ this.hasResult[index] = true;
++ this.data[index] = data;
++ }
++
++ /**
++ * Sets the result associated with the specified regionfile type. Note that
++ * results can only be set once per regionfile type.
++ *
++ * @param type The regionfile type.
++ * @param throwable The result to set.
++ */
++ public void setThrowable(final RegionFileType type, final Throwable throwable) {
++ final int index = type.ordinal();
++
++ if (this.hasResult[index]) {
++ throw new IllegalArgumentException("Result already exists for type " + type);
++ }
++ this.hasResult[index] = true;
++ this.throwables[index] = throwable;
++ }
++
++ /**
++ * Returns whether there is a result for the specified regionfile type.
++ *
++ * @param type Specified regionfile type.
++ *
++ * @return Whether a result exists for {@code type}.
++ */
++ public boolean hasResult(final RegionFileType type) {
++ return this.hasResult[type.ordinal()];
++ }
++
++ /**
++ * Returns the data result for the regionfile type.
++ *
++ * @param type Specified regionfile type.
++ *
++ * @throws IllegalArgumentException If the result has not been set for {@code type}.
++ * @return The data result for the specified type. If the result is a {@code Throwable},
++ * then returns {@code null}.
++ */
++ public CompoundTag getData(final RegionFileType type) {
++ final int index = type.ordinal();
++
++ if (!this.hasResult[index]) {
++ throw new IllegalArgumentException("Result does not exist for type " + type);
++ }
++
++ return this.data[index];
++ }
++
++ /**
++ * Returns the throwable result for the regionfile type.
++ *
++ * @param type Specified regionfile type.
++ *
++ * @throws IllegalArgumentException If the result has not been set for {@code type}.
++ * @return The throwable result for the specified type. If the result is an {@code CompoundTag},
++ * then returns {@code null}.
++ */
++ public Throwable getThrowable(final RegionFileType type) {
++ final int index = type.ordinal();
++
++ if (!this.hasResult[index]) {
++ throw new IllegalArgumentException("Result does not exist for type " + type);
++ }
++
++ return this.throwables[index];
++ }
++ }
++
++ private static final Object INIT_LOCK = new Object();
++
++ static RegionFileIOThread[] threads;
++
++ /* needs to be consistent given a set of parameters */
++ static RegionFileIOThread selectThread(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type) {
++ if (threads == null) {
++ throw new IllegalStateException("Threads not initialised");
++ }
++
++ final int regionX = chunkX >> 5;
++ final int regionZ = chunkZ >> 5;
++ final int typeOffset = type.ordinal();
++
++ return threads[(System.identityHashCode(world) + regionX + regionZ + typeOffset) % threads.length];
++ }
++
++ /**
++ * Shuts down the I/O executor(s). Watis for all tasks to complete if specified.
++ * Tasks queued during this call might not be accepted, and tasks queued after will not be accepted.
++ *
++ * @param wait Whether to wait until all tasks have completed.
++ */
++ public static void close(final boolean wait) {
++ for (int i = 0, len = threads.length; i < len; ++i) {
++ threads[i].close(false, true);
++ }
++ if (wait) {
++ RegionFileIOThread.flush();
++ }
++ }
++
++ public static long[] getExecutedTasks() {
++ final long[] ret = new long[threads.length];
++ for (int i = 0, len = threads.length; i < len; ++i) {
++ ret[i] = threads[i].getTotalTasksExecuted();
++ }
++
++ return ret;
++ }
++
++ public static long[] getTasksScheduled() {
++ final long[] ret = new long[threads.length];
++ for (int i = 0, len = threads.length; i < len; ++i) {
++ ret[i] = threads[i].getTotalTasksScheduled();
++ }
++ return ret;
++ }
++
++ public static void flush() {
++ for (int i = 0, len = threads.length; i < len; ++i) {
++ threads[i].waitUntilAllExecuted();
++ }
++ }
++
++ public static void partialFlush(final int totalTasksRemaining) {
++ long failures = 1L; // start out at 0.25ms
++
++ for (;;) {
++ final long[] executed = getExecutedTasks();
++ final long[] scheduled = getTasksScheduled();
++
++ long sum = 0;
++ for (int i = 0; i < executed.length; ++i) {
++ sum += scheduled[i] - executed[i];
++ }
++
++ if (sum <= totalTasksRemaining) {
++ break;
++ }
++
++ failures = ConcurrentUtil.linearLongBackoff(failures, 250_000L, 5_000_000L); // 500us, 5ms
++ }
++ }
++
++ /**
++ * Inits the executor with the specified number of threads.
++ *
++ * @param threads Specified number of threads.
++ */
++ public static void init(final int threads) {
++ synchronized (INIT_LOCK) {
++ if (RegionFileIOThread.threads != null) {
++ throw new IllegalStateException("Already initialised threads");
++ }
++
++ RegionFileIOThread.threads = new RegionFileIOThread[threads];
++
++ for (int i = 0; i < threads; ++i) {
++ RegionFileIOThread.threads[i] = new RegionFileIOThread(i);
++ RegionFileIOThread.threads[i].start();
++ }
++ }
++ }
++
++ private RegionFileIOThread(final int threadNumber) {
++ super(new PrioritisedThreadedTaskQueue(), (int)(1.0e6)); // 1.0ms spinwait time
++ this.setName("RegionFile I/O Thread #" + threadNumber);
++ this.setPriority(Thread.NORM_PRIORITY - 2); // we keep priority close to normal because threads can wait on us
++ this.setUncaughtExceptionHandler((final Thread thread, final Throwable thr) -> {
++ LOGGER.error("Uncaught exception thrown from I/O thread, report this! Thread: " + thread.getName(), thr);
++ });
++ }
++
++ /**
++ * Returns whether the current thread is a regionfile I/O executor.
++ * @return Whether the current thread is a regionfile I/O executor.
++ */
++ public static boolean isRegionFileThread() {
++ return Thread.currentThread() instanceof RegionFileIOThread;
++ }
++
++ /**
++ * Returns the priority associated with blocking I/O based on the current thread. The goal is to avoid
++ * dumb plugins from taking away priority from threads we consider crucial.
++ * @return The priroity to use with blocking I/O on the current thread.
++ */
++ public static PrioritisedExecutor.Priority getIOBlockingPriorityForCurrentThread() {
++ if (TickThread.isTickThread()) {
++ return PrioritisedExecutor.Priority.BLOCKING;
++ }
++ return PrioritisedExecutor.Priority.HIGHEST;
++ }
++
++ /**
++ * Returns the current {@code CompoundTag} pending for write for the specified chunk & regionfile type.
++ * Note that this does not copy the result, so do not modify the result returned.
++ *
++ * @param world Specified world.
++ * @param chunkX Specified chunk x.
++ * @param chunkZ Specified chunk z.
++ * @param type Specified regionfile type.
++ *
++ * @return The compound tag associated for the specified chunk. {@code null} if no write was pending, or if {@code null} is the write pending.
++ */
++ public static CompoundTag getPendingWrite(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type) {
++ final RegionFileIOThread thread = RegionFileIOThread.selectThread(world, chunkX, chunkZ, type);
++ return thread.getPendingWriteInternal(world, chunkX, chunkZ, type);
++ }
++
++ CompoundTag getPendingWriteInternal(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type) {
++ final ChunkDataController taskController = this.getControllerFor(world, type);
++ final ChunkDataTask task = taskController.tasks.get(Long.valueOf(CoordinateUtils.getChunkKey(chunkX, chunkZ)));
++
++ if (task == null) {
++ return null;
++ }
++
++ final CompoundTag ret = task.inProgressWrite;
++
++ return ret == ChunkDataTask.NOTHING_TO_WRITE ? null : ret;
++ }
++
++ /**
++ * Returns the priority for the specified regionfile type for the specified chunk.
++ * @param world Specified world.
++ * @param chunkX Specified chunk x.
++ * @param chunkZ Specified chunk z.
++ * @param type Specified regionfile type.
++ * @return The priority for the chunk
++ */
++ public static PrioritisedExecutor.Priority getPriority(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type) {
++ final RegionFileIOThread thread = RegionFileIOThread.selectThread(world, chunkX, chunkZ, type);
++ return thread.getPriorityInternal(world, chunkX, chunkZ, type);
++ }
++
++ PrioritisedExecutor.Priority getPriorityInternal(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type) {
++ final ChunkDataController taskController = this.getControllerFor(world, type);
++ final ChunkDataTask task = taskController.tasks.get(Long.valueOf(CoordinateUtils.getChunkKey(chunkX, chunkZ)));
++
++ if (task == null) {
++ return PrioritisedExecutor.Priority.COMPLETING;
++ }
++
++ return task.prioritisedTask.getPriority();
++ }
++
++ /**
++ * Sets the priority for all regionfile types for the specified chunk. Note that great care should
++ * be taken using this method, as there can be multiple tasks tied to the same chunk that want different
++ * priorities.
++ *
++ * @param world Specified world.
++ * @param chunkX Specified chunk x.
++ * @param chunkZ Specified chunk z.
++ * @param priority New priority.
++ *
++ * @see #raisePriority(ServerLevel, int, int, Priority)
++ * @see #raisePriority(ServerLevel, int, int, RegionFileType, Priority)
++ * @see #lowerPriority(ServerLevel, int, int, Priority)
++ * @see #lowerPriority(ServerLevel, int, int, RegionFileType, Priority)
++ */
++ public static void setPriority(final ServerLevel world, final int chunkX, final int chunkZ,
++ final PrioritisedExecutor.Priority priority) {
++ for (final RegionFileType type : CACHED_REGIONFILE_TYPES) {
++ RegionFileIOThread.setPriority(world, chunkX, chunkZ, type, priority);
++ }
++ }
++
++ /**
++ * Sets the priority for the specified regionfile type for the specified chunk. Note that great care should
++ * be taken using this method, as there can be multiple tasks tied to the same chunk that want different
++ * priorities.
++ *
++ * @param world Specified world.
++ * @param chunkX Specified chunk x.
++ * @param chunkZ Specified chunk z.
++ * @param type Specified regionfile type.
++ * @param priority New priority.
++ *
++ * @see #raisePriority(ServerLevel, int, int, Priority)
++ * @see #raisePriority(ServerLevel, int, int, RegionFileType, Priority)
++ * @see #lowerPriority(ServerLevel, int, int, Priority)
++ * @see #lowerPriority(ServerLevel, int, int, RegionFileType, Priority)
++ */
++ public static void setPriority(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type,
++ final PrioritisedExecutor.Priority priority) {
++ final RegionFileIOThread thread = RegionFileIOThread.selectThread(world, chunkX, chunkZ, type);
++ thread.setPriorityInternal(world, chunkX, chunkZ, type, priority);
++ }
++
++ void setPriorityInternal(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type,
++ final PrioritisedExecutor.Priority priority) {
++ final ChunkDataController taskController = this.getControllerFor(world, type);
++ final ChunkDataTask task = taskController.tasks.get(Long.valueOf(CoordinateUtils.getChunkKey(chunkX, chunkZ)));
++
++ if (task != null) {
++ task.prioritisedTask.setPriority(priority);
++ }
++ }
++
++ /**
++ * Raises the priority for all regionfile types for the specified chunk.
++ *
++ * @param world Specified world.
++ * @param chunkX Specified chunk x.
++ * @param chunkZ Specified chunk z.
++ * @param priority New priority.
++ *
++ * @see #setPriority(ServerLevel, int, int, Priority)
++ * @see #setPriority(ServerLevel, int, int, RegionFileType, Priority)
++ * @see #lowerPriority(ServerLevel, int, int, Priority)
++ * @see #lowerPriority(ServerLevel, int, int, RegionFileType, Priority)
++ */
++ public static void raisePriority(final ServerLevel world, final int chunkX, final int chunkZ,
++ final PrioritisedExecutor.Priority priority) {
++ for (final RegionFileType type : CACHED_REGIONFILE_TYPES) {
++ RegionFileIOThread.raisePriority(world, chunkX, chunkZ, type, priority);
++ }
++ }
++
++ /**
++ * Raises the priority for the specified regionfile type for the specified chunk.
++ *
++ * @param world Specified world.
++ * @param chunkX Specified chunk x.
++ * @param chunkZ Specified chunk z.
++ * @param type Specified regionfile type.
++ * @param priority New priority.
++ *
++ * @see #setPriority(ServerLevel, int, int, Priority)
++ * @see #setPriority(ServerLevel, int, int, RegionFileType, Priority)
++ * @see #lowerPriority(ServerLevel, int, int, Priority)
++ * @see #lowerPriority(ServerLevel, int, int, RegionFileType, Priority)
++ */
++ public static void raisePriority(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type,
++ final PrioritisedExecutor.Priority priority) {
++ final RegionFileIOThread thread = RegionFileIOThread.selectThread(world, chunkX, chunkZ, type);
++ thread.raisePriorityInternal(world, chunkX, chunkZ, type, priority);
++ }
++
++ void raisePriorityInternal(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type,
++ final PrioritisedExecutor.Priority priority) {
++ final ChunkDataController taskController = this.getControllerFor(world, type);
++ final ChunkDataTask task = taskController.tasks.get(Long.valueOf(CoordinateUtils.getChunkKey(chunkX, chunkZ)));
++
++ if (task != null) {
++ task.prioritisedTask.raisePriority(priority);
++ }
++ }
++
++ /**
++ * Lowers the priority for all regionfile types for the specified chunk.
++ *
++ * @param world Specified world.
++ * @param chunkX Specified chunk x.
++ * @param chunkZ Specified chunk z.
++ * @param priority New priority.
++ *
++ * @see #raisePriority(ServerLevel, int, int, Priority)
++ * @see #raisePriority(ServerLevel, int, int, RegionFileType, Priority)
++ * @see #setPriority(ServerLevel, int, int, Priority)
++ * @see #setPriority(ServerLevel, int, int, RegionFileType, Priority)
++ */
++ public static void lowerPriority(final ServerLevel world, final int chunkX, final int chunkZ,
++ final PrioritisedExecutor.Priority priority) {
++ for (final RegionFileType type : CACHED_REGIONFILE_TYPES) {
++ RegionFileIOThread.lowerPriority(world, chunkX, chunkZ, type, priority);
++ }
++ }
++
++ /**
++ * Lowers the priority for the specified regionfile type for the specified chunk.
++ *
++ * @param world Specified world.
++ * @param chunkX Specified chunk x.
++ * @param chunkZ Specified chunk z.
++ * @param type Specified regionfile type.
++ * @param priority New priority.
++ *
++ * @see #raisePriority(ServerLevel, int, int, Priority)
++ * @see #raisePriority(ServerLevel, int, int, RegionFileType, Priority)
++ * @see #setPriority(ServerLevel, int, int, Priority)
++ * @see #setPriority(ServerLevel, int, int, RegionFileType, Priority)
++ */
++ public static void lowerPriority(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type,
++ final PrioritisedExecutor.Priority priority) {
++ final RegionFileIOThread thread = RegionFileIOThread.selectThread(world, chunkX, chunkZ, type);
++ thread.lowerPriorityInternal(world, chunkX, chunkZ, type, priority);
++ }
++
++ void lowerPriorityInternal(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type,
++ final PrioritisedExecutor.Priority priority) {
++ final ChunkDataController taskController = this.getControllerFor(world, type);
++ final ChunkDataTask task = taskController.tasks.get(Long.valueOf(CoordinateUtils.getChunkKey(chunkX, chunkZ)));
++
++ if (task != null) {
++ task.prioritisedTask.lowerPriority(priority);
++ }
++ }
++
++ /**
++ * Schedules the chunk data to be written asynchronously.
++ *
++ * Impl notes:
++ *
++ *
++ * This function presumes a chunk load for the coordinates is not called during this function (anytime after is OK). This means
++ * saves must be scheduled before a chunk is unloaded.
++ *
++ *
++ * Writes may be called concurrently, although only the "later" write will go through.
++ *
++ *
++ * @param world Chunk's world
++ * @param chunkX Chunk's x coordinate
++ * @param chunkZ Chunk's z coordinate
++ * @param data Chunk's data
++ * @param type The regionfile type to write to.
++ *
++ * @throws IllegalStateException If the file io thread has shutdown.
++ */
++ public static void scheduleSave(final ServerLevel world, final int chunkX, final int chunkZ, final CompoundTag data,
++ final RegionFileType type) {
++ RegionFileIOThread.scheduleSave(world, chunkX, chunkZ, data, type, PrioritisedExecutor.Priority.NORMAL);
++ }
++
++ /**
++ * Schedules the chunk data to be written asynchronously.
++ *
++ * Impl notes:
++ *
++ *
++ * This function presumes a chunk load for the coordinates is not called during this function (anytime after is OK). This means
++ * saves must be scheduled before a chunk is unloaded.
++ *
++ *
++ * Writes may be called concurrently, although only the "later" write will go through.
++ *
++ *
++ * @param world Chunk's world
++ * @param chunkX Chunk's x coordinate
++ * @param chunkZ Chunk's z coordinate
++ * @param data Chunk's data
++ * @param type The regionfile type to write to.
++ * @param priority The minimum priority to schedule at.
++ *
++ * @throws IllegalStateException If the file io thread has shutdown.
++ */
++ public static void scheduleSave(final ServerLevel world, final int chunkX, final int chunkZ, final CompoundTag data,
++ final RegionFileType type, final PrioritisedExecutor.Priority priority) {
++ final RegionFileIOThread thread = RegionFileIOThread.selectThread(world, chunkX, chunkZ, type);
++ thread.scheduleSaveInternal(world, chunkX, chunkZ, data, type, priority);
++ }
++
++ void scheduleSaveInternal(final ServerLevel world, final int chunkX, final int chunkZ, final CompoundTag data,
++ final RegionFileType type, final PrioritisedExecutor.Priority priority) {
++ final ChunkDataController taskController = this.getControllerFor(world, type);
++
++ final boolean[] created = new boolean[1];
++ final ChunkCoordinate key = new ChunkCoordinate(CoordinateUtils.getChunkKey(chunkX, chunkZ));
++ final ChunkDataTask task = taskController.tasks.compute(key, (final ChunkCoordinate keyInMap, final ChunkDataTask taskRunning) -> {
++ if (taskRunning == null || taskRunning.failedWrite) {
++ // no task is scheduled or the previous write failed - meaning we need to overwrite it
++
++ // create task
++ final ChunkDataTask newTask = new ChunkDataTask(world, chunkX, chunkZ, taskController, RegionFileIOThread.this, priority);
++ newTask.inProgressWrite = data;
++ created[0] = true;
++
++ return newTask;
++ }
++
++ taskRunning.inProgressWrite = data;
++
++ return taskRunning;
++ });
++
++ if (created[0]) {
++ task.prioritisedTask.queue();
++ } else {
++ task.prioritisedTask.raisePriority(priority);
++ }
++ }
++
++ /**
++ * Schedules a load to be executed asynchronously. This task will load all regionfile types, and then call
++ * {@code onComplete}. This is a bulk load operation, see {@link #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean)}
++ * for single load.
++ *
++ * Impl notes:
++ *
++ *
++ * The {@code onComplete} parameter may be completed during the execution of this function synchronously or it may
++ * be completed asynchronously on this file io thread. Interacting with the file IO thread in the completion of
++ * data is undefined behaviour, and can cause deadlock.
++ *
++ *
++ * @param world Chunk's world
++ * @param chunkX Chunk's x coordinate
++ * @param chunkZ Chunk's z coordinate
++ * @param onComplete Consumer to execute once this task has completed
++ * @param intendingToBlock Whether the caller is intending to block on completion. This only affects the cost
++ * of this call.
++ *
++ * @return The {@link Cancellable} for this chunk load. Cancelling it will not affect other loads for the same chunk data.
++ *
++ * @see #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean)
++ * @see #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean, Priority)
++ * @see #loadChunkData(ServerLevel, int, int, Consumer, boolean, RegionFileType...)
++ * @see #loadChunkData(ServerLevel, int, int, Consumer, boolean, Priority, RegionFileType...)
++ */
++ public static Cancellable loadAllChunkData(final ServerLevel world, final int chunkX, final int chunkZ,
++ final Consumer onComplete, final boolean intendingToBlock) {
++ return RegionFileIOThread.loadAllChunkData(world, chunkX, chunkZ, onComplete, intendingToBlock, PrioritisedExecutor.Priority.NORMAL);
++ }
++
++ /**
++ * Schedules a load to be executed asynchronously. This task will load all regionfile types, and then call
++ * {@code onComplete}. This is a bulk load operation, see {@link #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean, Priority)}
++ * for single load.
++ *
++ * Impl notes:
++ *
++ *
++ * The {@code onComplete} parameter may be completed during the execution of this function synchronously or it may
++ * be completed asynchronously on this file io thread. Interacting with the file IO thread in the completion of
++ * data is undefined behaviour, and can cause deadlock.
++ *
++ *
++ * @param world Chunk's world
++ * @param chunkX Chunk's x coordinate
++ * @param chunkZ Chunk's z coordinate
++ * @param onComplete Consumer to execute once this task has completed
++ * @param intendingToBlock Whether the caller is intending to block on completion. This only affects the cost
++ * of this call.
++ * @param priority The minimum priority to load the data at.
++ *
++ * @return The {@link Cancellable} for this chunk load. Cancelling it will not affect other loads for the same chunk data.
++ *
++ * @see #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean)
++ * @see #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean, Priority)
++ * @see #loadChunkData(ServerLevel, int, int, Consumer, boolean, RegionFileType...)
++ * @see #loadChunkData(ServerLevel, int, int, Consumer, boolean, Priority, RegionFileType...)
++ */
++ public static Cancellable loadAllChunkData(final ServerLevel world, final int chunkX, final int chunkZ,
++ final Consumer onComplete, final boolean intendingToBlock,
++ final PrioritisedExecutor.Priority priority) {
++ return RegionFileIOThread.loadChunkData(world, chunkX, chunkZ, onComplete, intendingToBlock, priority, CACHED_REGIONFILE_TYPES);
++ }
++
++ /**
++ * Schedules a load to be executed asynchronously. This task will load data for the specified regionfile type(s), and
++ * then call {@code onComplete}. This is a bulk load operation, see {@link #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean)}
++ * for single load.
++ *
++ * Impl notes:
++ *
++ *
++ * The {@code onComplete} parameter may be completed during the execution of this function synchronously or it may
++ * be completed asynchronously on this file io thread. Interacting with the file IO thread in the completion of
++ * data is undefined behaviour, and can cause deadlock.
++ *
++ *
++ * @param world Chunk's world
++ * @param chunkX Chunk's x coordinate
++ * @param chunkZ Chunk's z coordinate
++ * @param onComplete Consumer to execute once this task has completed
++ * @param intendingToBlock Whether the caller is intending to block on completion. This only affects the cost
++ * of this call.
++ * @param types The regionfile type(s) to load.
++ *
++ * @return The {@link Cancellable} for this chunk load. Cancelling it will not affect other loads for the same chunk data.
++ *
++ * @see #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean)
++ * @see #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean, Priority)
++ * @see #loadAllChunkData(ServerLevel, int, int, Consumer, boolean)
++ * @see #loadAllChunkData(ServerLevel, int, int, Consumer, boolean, Priority)
++ */
++ public static Cancellable loadChunkData(final ServerLevel world, final int chunkX, final int chunkZ,
++ final Consumer onComplete, final boolean intendingToBlock,
++ final RegionFileType... types) {
++ return RegionFileIOThread.loadChunkData(world, chunkX, chunkZ, onComplete, intendingToBlock, PrioritisedExecutor.Priority.NORMAL, types);
++ }
++
++ /**
++ * Schedules a load to be executed asynchronously. This task will load data for the specified regionfile type(s), and
++ * then call {@code onComplete}. This is a bulk load operation, see {@link #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean, Priority)}
++ * for single load.
++ *
++ * Impl notes:
++ *
++ *
++ * The {@code onComplete} parameter may be completed during the execution of this function synchronously or it may
++ * be completed asynchronously on this file io thread. Interacting with the file IO thread in the completion of
++ * data is undefined behaviour, and can cause deadlock.
++ *
++ *
++ * @param world Chunk's world
++ * @param chunkX Chunk's x coordinate
++ * @param chunkZ Chunk's z coordinate
++ * @param onComplete Consumer to execute once this task has completed
++ * @param intendingToBlock Whether the caller is intending to block on completion. This only affects the cost
++ * of this call.
++ * @param types The regionfile type(s) to load.
++ * @param priority The minimum priority to load the data at.
++ *
++ * @return The {@link Cancellable} for this chunk load. Cancelling it will not affect other loads for the same chunk data.
++ *
++ * @see #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean)
++ * @see #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean, Priority)
++ * @see #loadAllChunkData(ServerLevel, int, int, Consumer, boolean)
++ * @see #loadAllChunkData(ServerLevel, int, int, Consumer, boolean, Priority)
++ */
++ public static Cancellable loadChunkData(final ServerLevel world, final int chunkX, final int chunkZ,
++ final Consumer onComplete, final boolean intendingToBlock,
++ final PrioritisedExecutor.Priority priority, final RegionFileType... types) {
++ if (types == null) {
++ throw new NullPointerException("Types cannot be null");
++ }
++ if (types.length == 0) {
++ throw new IllegalArgumentException("Types cannot be empty");
++ }
++
++ final RegionFileData ret = new RegionFileData();
++
++ final Cancellable[] reads = new CancellableRead[types.length];
++ final AtomicInteger completions = new AtomicInteger();
++ final int expectedCompletions = types.length;
++
++ for (int i = 0; i < expectedCompletions; ++i) {
++ final RegionFileType type = types[i];
++ reads[i] = RegionFileIOThread.loadDataAsync(world, chunkX, chunkZ, type,
++ (final CompoundTag data, final Throwable throwable) -> {
++ if (throwable != null) {
++ ret.setThrowable(type, throwable);
++ } else {
++ ret.setData(type, data);
++ }
++
++ if (completions.incrementAndGet() == expectedCompletions) {
++ onComplete.accept(ret);
++ }
++ }, intendingToBlock, priority);
++ }
++
++ return new CancellableReads(reads);
++ }
++
++ /**
++ * Schedules a load to be executed asynchronously. This task will load the specified regionfile type, and then call
++ * {@code onComplete}.
++ *
++ * Impl notes:
++ *
++ *
++ * The {@code onComplete} parameter may be completed during the execution of this function synchronously or it may
++ * be completed asynchronously on this file io thread. Interacting with the file IO thread in the completion of
++ * data is undefined behaviour, and can cause deadlock.
++ *
++ *
++ * @param world Chunk's world
++ * @param chunkX Chunk's x coordinate
++ * @param chunkZ Chunk's z coordinate
++ * @param onComplete Consumer to execute once this task has completed
++ * @param intendingToBlock Whether the caller is intending to block on completion. This only affects the cost
++ * of this call.
++ *
++ * @return The {@link Cancellable} for this chunk load. Cancelling it will not affect other loads for the same chunk data.
++ *
++ * @see #loadChunkData(ServerLevel, int, int, Consumer, boolean, RegionFileType...)
++ * @see #loadChunkData(ServerLevel, int, int, Consumer, boolean, Priority, RegionFileType...)
++ * @see #loadAllChunkData(ServerLevel, int, int, Consumer, boolean)
++ * @see #loadAllChunkData(ServerLevel, int, int, Consumer, boolean, Priority)
++ */
++ public static Cancellable loadDataAsync(final ServerLevel world, final int chunkX, final int chunkZ,
++ final RegionFileType type, final BiConsumer onComplete,
++ final boolean intendingToBlock) {
++ return RegionFileIOThread.loadDataAsync(world, chunkX, chunkZ, type, onComplete, intendingToBlock, PrioritisedExecutor.Priority.NORMAL);
++ }
++
++ /**
++ * Schedules a load to be executed asynchronously. This task will load the specified regionfile type, and then call
++ * {@code onComplete}.
++ *
++ * Impl notes:
++ *
++ *
++ * The {@code onComplete} parameter may be completed during the execution of this function synchronously or it may
++ * be completed asynchronously on this file io thread. Interacting with the file IO thread in the completion of
++ * data is undefined behaviour, and can cause deadlock.
++ *
++ *
++ * @param world Chunk's world
++ * @param chunkX Chunk's x coordinate
++ * @param chunkZ Chunk's z coordinate
++ * @param onComplete Consumer to execute once this task has completed
++ * @param intendingToBlock Whether the caller is intending to block on completion. This only affects the cost
++ * of this call.
++ * @param priority Minimum priority to load the data at.
++ *
++ * @return The {@link Cancellable} for this chunk load. Cancelling it will not affect other loads for the same chunk data.
++ *
++ * @see #loadChunkData(ServerLevel, int, int, Consumer, boolean, RegionFileType...)
++ * @see #loadChunkData(ServerLevel, int, int, Consumer, boolean, Priority, RegionFileType...)
++ * @see #loadAllChunkData(ServerLevel, int, int, Consumer, boolean)
++ * @see #loadAllChunkData(ServerLevel, int, int, Consumer, boolean, Priority)
++ */
++ public static Cancellable loadDataAsync(final ServerLevel world, final int chunkX, final int chunkZ,
++ final RegionFileType type, final BiConsumer onComplete,
++ final boolean intendingToBlock, final PrioritisedExecutor.Priority priority) {
++ final RegionFileIOThread thread = RegionFileIOThread.selectThread(world, chunkX, chunkZ, type);
++ return thread.loadDataAsyncInternal(world, chunkX, chunkZ, type, onComplete, intendingToBlock, priority);
++ }
++
++ private static Boolean doesRegionFileExist(final int chunkX, final int chunkZ, final boolean intendingToBlock,
++ final ChunkDataController taskController) {
++ final ChunkPos chunkPos = new ChunkPos(chunkX, chunkZ);
++ if (intendingToBlock) {
++ return taskController.computeForRegionFile(chunkX, chunkZ, true, (final RegionFile file) -> {
++ if (file == null) { // null if no regionfile exists
++ return Boolean.FALSE;
++ }
++
++ return file.hasChunk(chunkPos) ? Boolean.TRUE : Boolean.FALSE;
++ });
++ } else {
++ return taskController.computeForRegionFileIfLoaded(chunkX, chunkZ, (final RegionFile file) -> {
++ if (file == null) { // null if not loaded
++ return Boolean.TRUE;
++ }
++
++ return file.hasChunk(chunkPos) ? Boolean.TRUE : Boolean.FALSE;
++ });
++ }
++ }
++
++ Cancellable loadDataAsyncInternal(final ServerLevel world, final int chunkX, final int chunkZ,
++ final RegionFileType type, final BiConsumer onComplete,
++ final boolean intendingToBlock, final PrioritisedExecutor.Priority priority) {
++ final ChunkDataController taskController = this.getControllerFor(world, type);
++
++ final ImmediateCallbackCompletion callbackInfo = new ImmediateCallbackCompletion();
++
++ final ChunkCoordinate key = new ChunkCoordinate(CoordinateUtils.getChunkKey(chunkX, chunkZ));
++ final BiFunction compute = (final ChunkCoordinate keyInMap, final ChunkDataTask running) -> {
++ if (running == null) {
++ // not scheduled
++
++ if (callbackInfo.regionFileCalculation == null) {
++ // caller will compute this outside of compute(), to avoid holding the bin lock
++ callbackInfo.needsRegionFileTest = true;
++ return null;
++ }
++
++ if (callbackInfo.regionFileCalculation == Boolean.FALSE) {
++ // not on disk
++ callbackInfo.data = null;
++ callbackInfo.throwable = null;
++ callbackInfo.completeNow = true;
++ return null;
++ }
++
++ // set up task
++ final ChunkDataTask newTask = new ChunkDataTask(
++ world, chunkX, chunkZ, taskController, RegionFileIOThread.this, priority
++ );
++ newTask.inProgressRead = new RegionFileIOThread.InProgressRead();
++ newTask.inProgressRead.waiters.add(onComplete);
++
++ callbackInfo.tasksNeedsScheduling = true;
++ return newTask;
++ }
++
++ final CompoundTag pendingWrite = running.inProgressWrite;
++
++ if (pendingWrite == ChunkDataTask.NOTHING_TO_WRITE) {
++ // need to add to waiters here, because the regionfile thread will use compute() to lock and check for cancellations
++ if (!running.inProgressRead.addToWaiters(onComplete)) {
++ callbackInfo.data = running.inProgressRead.value;
++ callbackInfo.throwable = running.inProgressRead.throwable;
++ callbackInfo.completeNow = true;
++ }
++ return running;
++ }
++ // using the result sync here - don't bump priority
++
++ // at this stage we have to use the in progress write's data to avoid an order issue
++ callbackInfo.data = pendingWrite;
++ callbackInfo.throwable = null;
++ callbackInfo.completeNow = true;
++ return running;
++ };
++
++ ChunkDataTask curr = taskController.tasks.get(key);
++ if (curr == null) {
++ callbackInfo.regionFileCalculation = doesRegionFileExist(chunkX, chunkZ, intendingToBlock, taskController);
++ }
++ ChunkDataTask ret = taskController.tasks.compute(key, compute);
++ if (callbackInfo.needsRegionFileTest) {
++ // curr isn't null but when we went into compute() it was
++ callbackInfo.regionFileCalculation = doesRegionFileExist(chunkX, chunkZ, intendingToBlock, taskController);
++ // now it should be fine
++ ret = taskController.tasks.compute(key, compute);
++ }
++
++ // needs to be scheduled
++ if (callbackInfo.tasksNeedsScheduling) {
++ ret.prioritisedTask.queue();
++ } else if (callbackInfo.completeNow) {
++ try {
++ onComplete.accept(callbackInfo.data, callbackInfo.throwable);
++ } catch (final ThreadDeath thr) {
++ throw thr;
++ } catch (final Throwable thr) {
++ LOGGER.error("Callback " + ConcurrentUtil.genericToString(onComplete) + " synchronously failed to handle chunk data for task " + ret.toString(), thr);
++ }
++ } else {
++ // we're waiting on a task we didn't schedule, so raise its priority to what we want
++ ret.prioritisedTask.raisePriority(priority);
++ }
++
++ return new CancellableRead(onComplete, ret);
++ }
++
++ /**
++ * Schedules a load task to be executed asynchronously, and blocks on that task.
++ *
++ * @param world Chunk's world
++ * @param chunkX Chunk's x coordinate
++ * @param chunkZ Chunk's z coordinate
++ * @param type Regionfile type
++ * @param priority Minimum priority to load the data at.
++ *
++ * @return The chunk data for the chunk. Note that a {@code null} result means the chunk or regionfile does not exist on disk.
++ *
++ * @throws IOException If the load fails for any reason
++ */
++ public static CompoundTag loadData(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type,
++ final PrioritisedExecutor.Priority priority) throws IOException {
++ final CompletableFuture ret = new CompletableFuture<>();
++
++ RegionFileIOThread.loadDataAsync(world, chunkX, chunkZ, type, (final CompoundTag compound, final Throwable thr) -> {
++ if (thr != null) {
++ ret.completeExceptionally(thr);
++ } else {
++ ret.complete(compound);
++ }
++ }, true, priority);
++
++ try {
++ return ret.join();
++ } catch (final CompletionException ex) {
++ throw new IOException(ex);
++ }
++ }
++
++ private static final class ImmediateCallbackCompletion {
++
++ public CompoundTag data;
++ public Throwable throwable;
++ public boolean completeNow;
++ public boolean tasksNeedsScheduling;
++ public boolean needsRegionFileTest;
++ public Boolean regionFileCalculation;
++
++ }
++
++ static final class CancellableRead implements Cancellable {
++
++ private BiConsumer callback;
++ private RegionFileIOThread.ChunkDataTask task;
++
++ CancellableRead(final BiConsumer callback, final RegionFileIOThread.ChunkDataTask task) {
++ this.callback = callback;
++ this.task = task;
++ }
++
++ @Override
++ public boolean cancel() {
++ final BiConsumer callback = this.callback;
++ final RegionFileIOThread.ChunkDataTask task = this.task;
++
++ if (callback == null || task == null) {
++ return false;
++ }
++
++ this.callback = null;
++ this.task = null;
++
++ final RegionFileIOThread.InProgressRead read = task.inProgressRead;
++
++ // read can be null if no read was scheduled (i.e no regionfile existed or chunk in regionfile didn't)
++ return (read != null && read.waiters.remove(callback));
++ }
++ }
++
++ static final class CancellableReads implements Cancellable {
++
++ private Cancellable[] reads;
++
++ protected static final VarHandle READS_HANDLE = ConcurrentUtil.getVarHandle(CancellableReads.class, "reads", Cancellable[].class);
++
++ CancellableReads(final Cancellable[] reads) {
++ this.reads = reads;
++ }
++
++ @Override
++ public boolean cancel() {
++ final Cancellable[] reads = (Cancellable[])READS_HANDLE.getAndSet((CancellableReads)this, (Cancellable[])null);
++
++ if (reads == null) {
++ return false;
++ }
++
++ boolean ret = false;
++
++ for (final Cancellable read : reads) {
++ ret |= read.cancel();
++ }
++
++ return ret;
++ }
++ }
++
++ static final class InProgressRead {
++
++ private static final Logger LOGGER = LogUtils.getClassLogger();
++
++ CompoundTag value;
++ Throwable throwable;
++ final MultiThreadedQueue> waiters = new MultiThreadedQueue<>();
++
++ // rets false if already completed (callback not invoked), true if callback was added
++ boolean addToWaiters(final BiConsumer callback) {
++ return this.waiters.add(callback);
++ }
++
++ void complete(final RegionFileIOThread.ChunkDataTask task, final CompoundTag value, final Throwable throwable) {
++ this.value = value;
++ this.throwable = throwable;
++
++ BiConsumer consumer;
++ while ((consumer = this.waiters.pollOrBlockAdds()) != null) {
++ try {
++ consumer.accept(value, throwable);
++ } catch (final ThreadDeath thr) {
++ throw thr;
++ } catch (final Throwable thr) {
++ LOGGER.error("Callback " + ConcurrentUtil.genericToString(consumer) + " failed to handle chunk data for task " + task.toString(), thr);
++ }
++ }
++ }
++ }
++
++ /**
++ * Class exists to replace {@link Long} usages as keys inside non-fastutil hashtables. The hash for some Long {@code x}
++ * is defined as {@code (x >>> 32) ^ x}. Chunk keys as long values are defined as {@code ((chunkX & 0xFFFFFFFFL) | (chunkZ << 32))},
++ * which means the hashcode as a Long value will be {@code chunkX ^ chunkZ}. Given that most chunks are created within a radius arounds players,
++ * this will lead to many hash collisions. So, this class uses a better hashing algorithm so that usage of
++ * non-fastutil collections is not degraded.
++ */
++ public static final class ChunkCoordinate implements Comparable {
++
++ public final long key;
++
++ public ChunkCoordinate(final long key) {
++ this.key = key;
++ }
++
++ @Override
++ public int hashCode() {
++ return (int)HashCommon.mix(this.key);
++ }
++
++ @Override
++ public boolean equals(final Object obj) {
++ if (this == obj) {
++ return true;
++ }
++
++ if (!(obj instanceof ChunkCoordinate)) {
++ return false;
++ }
++
++ final ChunkCoordinate other = (ChunkCoordinate)obj;
++
++ return this.key == other.key;
++ }
++
++ // This class is intended for HashMap/ConcurrentHashMap usage, which do treeify bin nodes if the chain
++ // is too large. So we should implement compareTo to help.
++ @Override
++ public int compareTo(final RegionFileIOThread.ChunkCoordinate other) {
++ return Long.compare(this.key, other.key);
++ }
++
++ @Override
++ public String toString() {
++ return new ChunkPos(this.key).toString();
++ }
++ }
++
++ public static abstract class ChunkDataController {
++
++ // ConcurrentHashMap synchronizes per chain, so reduce the chance of task's hashes colliding.
++ protected final ConcurrentHashMap tasks = new ConcurrentHashMap<>(8192, 0.10f);
++
++ public final RegionFileType type;
++
++ public ChunkDataController(final RegionFileType type) {
++ this.type = type;
++ }
++
++ public abstract RegionFileStorage getCache();
++
++ public abstract void writeData(final int chunkX, final int chunkZ, final CompoundTag compound) throws IOException;
++
++ public abstract CompoundTag readData(final int chunkX, final int chunkZ) throws IOException;
++
++ public boolean hasTasks() {
++ return !this.tasks.isEmpty();
++ }
++
++ public T computeForRegionFile(final int chunkX, final int chunkZ, final boolean existingOnly, final Function function) {
++ final RegionFileStorage cache = this.getCache();
++ final RegionFile regionFile;
++ synchronized (cache) {
++ try {
++ regionFile = cache.getRegionFile(new ChunkPos(chunkX, chunkZ), existingOnly, true);
++ } catch (final IOException ex) {
++ throw new RuntimeException(ex);
++ }
++ }
++
++ try {
++ return function.apply(regionFile);
++ } finally {
++ if (regionFile != null) {
++ regionFile.fileLock.unlock();
++ }
++ }
++ }
++
++ public T computeForRegionFileIfLoaded(final int chunkX, final int chunkZ, final Function function) {
++ final RegionFileStorage cache = this.getCache();
++ final RegionFile regionFile;
++
++ synchronized (cache) {
++ regionFile = cache.getRegionFileIfLoaded(new ChunkPos(chunkX, chunkZ));
++ if (regionFile != null) {
++ regionFile.fileLock.lock();
++ }
++ }
++
++ try {
++ return function.apply(regionFile);
++ } finally {
++ if (regionFile != null) {
++ regionFile.fileLock.unlock();
++ }
++ }
++ }
++ }
++
++ static final class ChunkDataTask implements Runnable {
++
++ protected static final CompoundTag NOTHING_TO_WRITE = new CompoundTag();
++
++ private static final Logger LOGGER = LogUtils.getClassLogger();
++
++ RegionFileIOThread.InProgressRead inProgressRead;
++ volatile CompoundTag inProgressWrite = NOTHING_TO_WRITE; // only needs to be acquire/release
++
++ boolean failedWrite;
++
++ final ServerLevel world;
++ final int chunkX;
++ final int chunkZ;
++ final RegionFileIOThread.ChunkDataController taskController;
++
++ final PrioritisedExecutor.PrioritisedTask prioritisedTask;
++
++ /*
++ * IO thread will perform reads before writes for a given chunk x and z
++ *
++ * How reads/writes are scheduled:
++ *
++ * If read is scheduled while scheduling write, take no special action and just schedule write
++ * If read is scheduled while scheduling read and no write is scheduled, chain the read task
++ *
++ *
++ * If write is scheduled while scheduling read, use the pending write data and ret immediately (so no read is scheduled)
++ * If write is scheduled while scheduling write (ignore read in progress), overwrite the write in progress data
++ *
++ * This allows the reads and writes to act as if they occur synchronously to the thread scheduling them, however
++ * it fails to properly propagate write failures thanks to writes overwriting each other
++ */
++
++ public ChunkDataTask(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileIOThread.ChunkDataController taskController,
++ final PrioritisedExecutor executor, final PrioritisedExecutor.Priority priority) {
++ this.world = world;
++ this.chunkX = chunkX;
++ this.chunkZ = chunkZ;
++ this.taskController = taskController;
++ this.prioritisedTask = executor.createTask(this, priority);
++ }
++
++ @Override
++ public String toString() {
++ return "Task for world: '" + this.world.getWorld().getName() + "' at (" + this.chunkX + "," + this.chunkZ +
++ ") type: " + this.taskController.type.name() + ", hash: " + this.hashCode();
++ }
++
++ @Override
++ public void run() {
++ final RegionFileIOThread.InProgressRead read = this.inProgressRead;
++ final ChunkCoordinate chunkKey = new ChunkCoordinate(CoordinateUtils.getChunkKey(this.chunkX, this.chunkZ));
++
++ if (read != null) {
++ final boolean[] canRead = new boolean[] { true };
++
++ if (read.waiters.isEmpty()) {
++ // cancelled read? go to task controller to confirm
++ final ChunkDataTask inMap = this.taskController.tasks.compute(chunkKey, (final ChunkCoordinate keyInMap, final ChunkDataTask valueInMap) -> {
++ if (valueInMap == null) {
++ throw new IllegalStateException("Write completed concurrently, expected this task: " + ChunkDataTask.this.toString() + ", report this!");
++ }
++ if (valueInMap != ChunkDataTask.this) {
++ throw new IllegalStateException("Chunk task mismatch, expected this task: " + ChunkDataTask.this.toString() + ", got: " + valueInMap.toString() + ", report this!");
++ }
++
++ if (!read.waiters.isEmpty()) { // as per usual IntelliJ is unable to figure out that there are concurrent accesses.
++ return valueInMap;
++ } else {
++ canRead[0] = false;
++ }
++
++ return valueInMap.inProgressWrite == NOTHING_TO_WRITE ? null : valueInMap;
++ });
++
++ if (inMap == null) {
++ // read is cancelled - and no write pending, so we're done
++ return;
++ }
++ // if there is a write in progress, we don't actually have to worry about waiters gaining new entries -
++ // the readers will just use the in progress write, so the value in canRead is good to use without
++ // further synchronisation.
++ }
++
++ if (canRead[0]) {
++ CompoundTag compound = null;
++ Throwable throwable = null;
++
++ try {
++ compound = this.taskController.readData(this.chunkX, this.chunkZ);
++ } catch (final ThreadDeath thr) {
++ throw thr;
++ } catch (final Throwable thr) {
++ throwable = thr;
++ LOGGER.error("Failed to read chunk data for task: " + this.toString(), thr);
++ }
++ read.complete(this, compound, throwable);
++ }
++ }
++
++ CompoundTag write = this.inProgressWrite;
++
++ if (write == NOTHING_TO_WRITE) {
++ final ChunkDataTask inMap = this.taskController.tasks.compute(chunkKey, (final ChunkCoordinate keyInMap, final ChunkDataTask valueInMap) -> {
++ if (valueInMap == null) {
++ throw new IllegalStateException("Write completed concurrently, expected this task: " + ChunkDataTask.this.toString() + ", report this!");
++ }
++ if (valueInMap != ChunkDataTask.this) {
++ throw new IllegalStateException("Chunk task mismatch, expected this task: " + ChunkDataTask.this.toString() + ", got: " + valueInMap.toString() + ", report this!");
++ }
++ return valueInMap.inProgressWrite == NOTHING_TO_WRITE ? null : valueInMap;
++ });
++
++ if (inMap == null) {
++ return; // set the task value to null, indicating we're done
++ } // else: inProgressWrite changed, so now we have something to write
++ }
++
++ for (;;) {
++ write = this.inProgressWrite;
++ final CompoundTag dataWritten = write;
++
++ boolean failedWrite = false;
++
++ try {
++ this.taskController.writeData(this.chunkX, this.chunkZ, write);
++ } catch (final ThreadDeath thr) {
++ throw thr;
++ } catch (final Throwable thr) {
++ if (thr instanceof RegionFileStorage.RegionFileSizeException) {
++ final int maxSize = RegionFile.MAX_CHUNK_SIZE / (1024 * 1024);
++ LOGGER.error("Chunk at (" + this.chunkX + "," + this.chunkZ + ") in '" + this.world.getWorld().getName() + "' exceeds max size of " + maxSize + "MiB, it has been deleted from disk.");
++ } else {
++ failedWrite = thr instanceof IOException;
++ LOGGER.error("Failed to write chunk data for task: " + this.toString(), thr);
++ }
++ }
++
++ final boolean finalFailWrite = failedWrite;
++ final boolean[] done = new boolean[] { false };
++
++ this.taskController.tasks.compute(chunkKey, (final ChunkCoordinate keyInMap, final ChunkDataTask valueInMap) -> {
++ if (valueInMap == null) {
++ throw new IllegalStateException("Write completed concurrently, expected this task: " + ChunkDataTask.this.toString() + ", report this!");
++ }
++ if (valueInMap != ChunkDataTask.this) {
++ throw new IllegalStateException("Chunk task mismatch, expected this task: " + ChunkDataTask.this.toString() + ", got: " + valueInMap.toString() + ", report this!");
++ }
++ if (valueInMap.inProgressWrite == dataWritten) {
++ valueInMap.failedWrite = finalFailWrite;
++ done[0] = true;
++ // keep the data in map if we failed the write so we can try to prevent data loss
++ return finalFailWrite ? valueInMap : null;
++ }
++ // different data than expected, means we need to retry write
++ return valueInMap;
++ });
++
++ if (done[0]) {
++ return;
++ }
++
++ // fetch & write new data
++ continue;
++ }
++ }
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/chunk/system/light/LightQueue.java b/src/main/java/io/papermc/paper/chunk/system/light/LightQueue.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..0b7a2b0ead4f3bc07bfd9a38c2b7cf024bd140c6
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/chunk/system/light/LightQueue.java
+@@ -0,0 +1,280 @@
++package io.papermc.paper.chunk.system.light;
++
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
++import ca.spottedleaf.starlight.common.light.BlockStarLightEngine;
++import ca.spottedleaf.starlight.common.light.SkyStarLightEngine;
++import ca.spottedleaf.starlight.common.light.StarLightInterface;
++import io.papermc.paper.chunk.system.scheduling.ChunkTaskScheduler;
++import io.papermc.paper.util.CoordinateUtils;
++import it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap;
++import it.unimi.dsi.fastutil.shorts.ShortCollection;
++import it.unimi.dsi.fastutil.shorts.ShortOpenHashSet;
++import net.minecraft.core.BlockPos;
++import net.minecraft.core.SectionPos;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.world.level.ChunkPos;
++import java.util.ArrayList;
++import java.util.HashSet;
++import java.util.List;
++import java.util.Set;
++import java.util.concurrent.CompletableFuture;
++import java.util.function.BooleanSupplier;
++
++public final class LightQueue {
++
++ protected final Long2ObjectOpenHashMap chunkTasks = new Long2ObjectOpenHashMap<>();
++ protected final StarLightInterface manager;
++ protected final ServerLevel world;
++
++ public LightQueue(final StarLightInterface manager) {
++ this.manager = manager;
++ this.world = ((ServerLevel)manager.getWorld());
++ }
++
++ public void lowerPriority(final int chunkX, final int chunkZ, final PrioritisedExecutor.Priority priority) {
++ final ChunkTasks task;
++ synchronized (this) {
++ task = this.chunkTasks.get(CoordinateUtils.getChunkKey(chunkX, chunkZ));
++ }
++ if (task != null) {
++ task.lowerPriority(priority);
++ }
++ }
++
++ public void setPriority(final int chunkX, final int chunkZ, final PrioritisedExecutor.Priority priority) {
++ final ChunkTasks task;
++ synchronized (this) {
++ task = this.chunkTasks.get(CoordinateUtils.getChunkKey(chunkX, chunkZ));
++ }
++ if (task != null) {
++ task.setPriority(priority);
++ }
++ }
++
++ public void raisePriority(final int chunkX, final int chunkZ, final PrioritisedExecutor.Priority priority) {
++ final ChunkTasks task;
++ synchronized (this) {
++ task = this.chunkTasks.get(CoordinateUtils.getChunkKey(chunkX, chunkZ));
++ }
++ if (task != null) {
++ task.raisePriority(priority);
++ }
++ }
++
++ public PrioritisedExecutor.Priority getPriority(final int chunkX, final int chunkZ) {
++ final ChunkTasks task;
++ synchronized (this) {
++ task = this.chunkTasks.get(CoordinateUtils.getChunkKey(chunkX, chunkZ));
++ }
++ if (task != null) {
++ return task.getPriority();
++ }
++
++ return PrioritisedExecutor.Priority.COMPLETING;
++ }
++
++ public boolean isEmpty() {
++ synchronized (this) {
++ return this.chunkTasks.isEmpty();
++ }
++ }
++
++ public CompletableFuture queueBlockChange(final BlockPos pos) {
++ final ChunkTasks tasks;
++ synchronized (this) {
++ tasks = this.chunkTasks.computeIfAbsent(CoordinateUtils.getChunkKey(pos), (final long keyInMap) -> {
++ return new ChunkTasks(keyInMap, LightQueue.this.manager, LightQueue.this);
++ });
++ tasks.changedPositions.add(pos.immutable());
++ }
++
++ tasks.schedule();
++
++ return tasks.onComplete;
++ }
++
++ public CompletableFuture queueSectionChange(final SectionPos pos, final boolean newEmptyValue) {
++ final ChunkTasks tasks;
++ synchronized (this) {
++ tasks = this.chunkTasks.computeIfAbsent(CoordinateUtils.getChunkKey(pos), (final long keyInMap) -> {
++ return new ChunkTasks(keyInMap, LightQueue.this.manager, LightQueue.this);
++ });
++
++ if (tasks.changedSectionSet == null) {
++ tasks.changedSectionSet = new Boolean[this.manager.maxSection - this.manager.minSection + 1];
++ }
++ tasks.changedSectionSet[pos.getY() - this.manager.minSection] = Boolean.valueOf(newEmptyValue);
++ }
++
++ tasks.schedule();
++
++ return tasks.onComplete;
++ }
++
++ public CompletableFuture queueChunkLightTask(final ChunkPos pos, final BooleanSupplier lightTask, final PrioritisedExecutor.Priority priority) {
++ final ChunkTasks tasks;
++ synchronized (this) {
++ tasks = this.chunkTasks.computeIfAbsent(CoordinateUtils.getChunkKey(pos), (final long keyInMap) -> {
++ return new ChunkTasks(keyInMap, LightQueue.this.manager, LightQueue.this, priority);
++ });
++ if (tasks.lightTasks == null) {
++ tasks.lightTasks = new ArrayList<>();
++ }
++ tasks.lightTasks.add(lightTask);
++ }
++
++ tasks.schedule();
++
++ return tasks.onComplete;
++ }
++
++ public CompletableFuture queueChunkSkylightEdgeCheck(final SectionPos pos, final ShortCollection sections) {
++ final ChunkTasks tasks;
++ synchronized (this) {
++ tasks = this.chunkTasks.computeIfAbsent(CoordinateUtils.getChunkKey(pos), (final long keyInMap) -> {
++ return new ChunkTasks(keyInMap, LightQueue.this.manager, LightQueue.this);
++ });
++
++ ShortOpenHashSet queuedEdges = tasks.queuedEdgeChecksSky;
++ if (queuedEdges == null) {
++ queuedEdges = tasks.queuedEdgeChecksSky = new ShortOpenHashSet();
++ }
++ queuedEdges.addAll(sections);
++ }
++
++ tasks.schedule();
++
++ return tasks.onComplete;
++ }
++
++ public CompletableFuture queueChunkBlocklightEdgeCheck(final SectionPos pos, final ShortCollection sections) {
++ final ChunkTasks tasks;
++
++ synchronized (this) {
++ tasks = this.chunkTasks.computeIfAbsent(CoordinateUtils.getChunkKey(pos), (final long keyInMap) -> {
++ return new ChunkTasks(keyInMap, LightQueue.this.manager, LightQueue.this);
++ });
++
++ ShortOpenHashSet queuedEdges = tasks.queuedEdgeChecksBlock;
++ if (queuedEdges == null) {
++ queuedEdges = tasks.queuedEdgeChecksBlock = new ShortOpenHashSet();
++ }
++ queuedEdges.addAll(sections);
++ }
++
++ tasks.schedule();
++
++ return tasks.onComplete;
++ }
++
++ public void removeChunk(final ChunkPos pos) {
++ final ChunkTasks tasks;
++ synchronized (this) {
++ tasks = this.chunkTasks.remove(CoordinateUtils.getChunkKey(pos));
++ }
++ if (tasks != null && tasks.cancel()) {
++ tasks.onComplete.complete(null);
++ }
++ }
++
++ protected static final class ChunkTasks implements Runnable {
++
++ final Set changedPositions = new HashSet<>();
++ Boolean[] changedSectionSet;
++ ShortOpenHashSet queuedEdgeChecksSky;
++ ShortOpenHashSet queuedEdgeChecksBlock;
++ List lightTasks;
++
++ final CompletableFuture onComplete = new CompletableFuture<>();
++
++ public final long chunkCoordinate;
++ private final StarLightInterface lightEngine;
++ private final LightQueue queue;
++ private final PrioritisedExecutor.PrioritisedTask task;
++
++ public ChunkTasks(final long chunkCoordinate, final StarLightInterface lightEngine, final LightQueue queue) {
++ this(chunkCoordinate, lightEngine, queue, PrioritisedExecutor.Priority.NORMAL);
++ }
++
++ public ChunkTasks(final long chunkCoordinate, final StarLightInterface lightEngine, final LightQueue queue,
++ final PrioritisedExecutor.Priority priority) {
++ this.chunkCoordinate = chunkCoordinate;
++ this.lightEngine = lightEngine;
++ this.queue = queue;
++ this.task = queue.world.chunkTaskScheduler.lightExecutor.createTask(this, priority);
++ }
++
++ public void schedule() {
++ this.task.queue();
++ }
++
++ public boolean cancel() {
++ return this.task.cancel();
++ }
++
++ public PrioritisedExecutor.Priority getPriority() {
++ return this.task.getPriority();
++ }
++
++ public void lowerPriority(final PrioritisedExecutor.Priority priority) {
++ this.task.lowerPriority(priority);
++ }
++
++ public void setPriority(final PrioritisedExecutor.Priority priority) {
++ this.task.setPriority(priority);
++ }
++
++ public void raisePriority(final PrioritisedExecutor.Priority priority) {
++ this.task.raisePriority(priority);
++ }
++
++ @Override
++ public void run() {
++ final SkyStarLightEngine skyEngine = this.lightEngine.getSkyLightEngine();
++ final BlockStarLightEngine blockEngine = this.lightEngine.getBlockLightEngine();
++ try {
++ synchronized (this.queue) {
++ this.queue.chunkTasks.remove(this.chunkCoordinate);
++ }
++
++ boolean litChunk = false;
++ if (this.lightTasks != null) {
++ for (final BooleanSupplier run : this.lightTasks) {
++ if (run.getAsBoolean()) {
++ litChunk = true;
++ break;
++ }
++ }
++ }
++
++ final long coordinate = this.chunkCoordinate;
++ final int chunkX = CoordinateUtils.getChunkX(coordinate);
++ final int chunkZ = CoordinateUtils.getChunkZ(coordinate);
++
++ final Set positions = this.changedPositions;
++ final Boolean[] sectionChanges = this.changedSectionSet;
++
++ if (!litChunk) {
++ if (skyEngine != null && (!positions.isEmpty() || sectionChanges != null)) {
++ skyEngine.blocksChangedInChunk(this.lightEngine.getLightAccess(), chunkX, chunkZ, positions, sectionChanges);
++ }
++ if (blockEngine != null && (!positions.isEmpty() || sectionChanges != null)) {
++ blockEngine.blocksChangedInChunk(this.lightEngine.getLightAccess(), chunkX, chunkZ, positions, sectionChanges);
++ }
++
++ if (skyEngine != null && this.queuedEdgeChecksSky != null) {
++ skyEngine.checkChunkEdges(this.lightEngine.getLightAccess(), chunkX, chunkZ, this.queuedEdgeChecksSky);
++ }
++ if (blockEngine != null && this.queuedEdgeChecksBlock != null) {
++ blockEngine.checkChunkEdges(this.lightEngine.getLightAccess(), chunkX, chunkZ, this.queuedEdgeChecksBlock);
++ }
++ }
++
++ this.onComplete.complete(null);
++ } finally {
++ this.lightEngine.releaseSkyLightEngine(skyEngine);
++ this.lightEngine.releaseBlockLightEngine(blockEngine);
++ }
++ }
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/chunk/system/poi/PoiChunk.java b/src/main/java/io/papermc/paper/chunk/system/poi/PoiChunk.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..d72041aa814ff179e6e29a45dcd359a91d426d47
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/chunk/system/poi/PoiChunk.java
+@@ -0,0 +1,213 @@
++package io.papermc.paper.chunk.system.poi;
++
++import com.mojang.logging.LogUtils;
++import com.mojang.serialization.Codec;
++import com.mojang.serialization.DataResult;
++import io.papermc.paper.util.CoordinateUtils;
++import io.papermc.paper.util.TickThread;
++import io.papermc.paper.util.WorldUtil;
++import net.minecraft.SharedConstants;
++import net.minecraft.nbt.CompoundTag;
++import net.minecraft.nbt.NbtOps;
++import net.minecraft.nbt.Tag;
++import net.minecraft.resources.RegistryOps;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.world.entity.ai.village.poi.PoiManager;
++import net.minecraft.world.entity.ai.village.poi.PoiSection;
++import org.slf4j.Logger;
++
++import java.util.Optional;
++
++public final class PoiChunk {
++
++ private static final Logger LOGGER = LogUtils.getClassLogger();
++
++ public final ServerLevel world;
++ public final int chunkX;
++ public final int chunkZ;
++ public final int minSection;
++ public final int maxSection;
++
++ protected final PoiSection[] sections;
++
++ private boolean isDirty;
++ private boolean loaded;
++
++ public PoiChunk(final ServerLevel world, final int chunkX, final int chunkZ, final int minSection, final int maxSection) {
++ this(world, chunkX, chunkZ, minSection, maxSection, new PoiSection[maxSection - minSection + 1]);
++ }
++
++ public PoiChunk(final ServerLevel world, final int chunkX, final int chunkZ, final int minSection, final int maxSection, final PoiSection[] sections) {
++ this.world = world;
++ this.chunkX = chunkX;
++ this.chunkZ = chunkZ;
++ this.minSection = minSection;
++ this.maxSection = maxSection;
++ this.sections = sections;
++ if (this.sections.length != (maxSection - minSection + 1)) {
++ throw new IllegalStateException("Incorrect length used, expected " + (maxSection - minSection + 1) + ", got " + this.sections.length);
++ }
++ }
++
++ public void load() {
++ TickThread.ensureTickThread(this.world, this.chunkX, this.chunkZ, "Loading in poi chunk off-main");
++ if (this.loaded) {
++ return;
++ }
++ this.loaded = true;
++ this.world.chunkSource.getPoiManager().loadInPoiChunk(this);
++ }
++
++ public boolean isLoaded() {
++ return this.loaded;
++ }
++
++ public boolean isEmpty() {
++ for (final PoiSection section : this.sections) {
++ if (section != null && !section.isEmpty()) {
++ return false;
++ }
++ }
++
++ return true;
++ }
++
++ public PoiSection getOrCreateSection(final int chunkY) {
++ if (chunkY >= this.minSection && chunkY <= this.maxSection) {
++ final int idx = chunkY - this.minSection;
++ final PoiSection ret = this.sections[idx];
++ if (ret != null) {
++ return ret;
++ }
++
++ final PoiManager poiManager = this.world.getPoiManager();
++ final long key = CoordinateUtils.getChunkSectionKey(this.chunkX, chunkY, this.chunkZ);
++
++ return this.sections[idx] = new PoiSection(() -> {
++ poiManager.setDirty(key);
++ });
++ }
++ throw new IllegalArgumentException("chunkY is out of bounds, chunkY: " + chunkY + " outside [" + this.minSection + "," + this.maxSection + "]");
++ }
++
++ public PoiSection getSection(final int chunkY) {
++ if (chunkY >= this.minSection && chunkY <= this.maxSection) {
++ return this.sections[chunkY - this.minSection];
++ }
++ return null;
++ }
++
++ public Optional getSectionForVanilla(final int chunkY) {
++ if (chunkY >= this.minSection && chunkY <= this.maxSection) {
++ final PoiSection ret = this.sections[chunkY - this.minSection];
++ return ret == null ? Optional.empty() : ret.noAllocateOptional;
++ }
++ return Optional.empty();
++ }
++
++ public boolean isDirty() {
++ return this.isDirty;
++ }
++
++ public void setDirty(final boolean dirty) {
++ this.isDirty = dirty;
++ }
++
++ // returns null if empty
++ public CompoundTag save() {
++ final RegistryOps registryOps = RegistryOps.create(NbtOps.INSTANCE, world.getPoiManager().registryAccess);
++
++ final CompoundTag ret = new CompoundTag();
++ final CompoundTag sections = new CompoundTag();
++ ret.put("Sections", sections);
++
++ ret.putInt("DataVersion", SharedConstants.getCurrentVersion().getDataVersion().getVersion());
++
++ final ServerLevel world = this.world;
++ final PoiManager poiManager = world.getPoiManager();
++ final int chunkX = this.chunkX;
++ final int chunkZ = this.chunkZ;
++
++ for (int sectionY = this.minSection; sectionY <= this.maxSection; ++sectionY) {
++ final PoiSection chunk = this.sections[sectionY - this.minSection];
++ if (chunk == null || chunk.isEmpty()) {
++ continue;
++ }
++
++ final long key = CoordinateUtils.getChunkSectionKey(chunkX, sectionY, chunkZ);
++ // codecs are honestly such a fucking disaster. What the fuck is this trash?
++ final Codec codec = PoiSection.codec(() -> {
++ poiManager.setDirty(key);
++ });
++
++ final DataResult serializedResult = codec.encodeStart(registryOps, chunk);
++ final int finalSectionY = sectionY;
++ final Tag serialized = serializedResult.resultOrPartial((final String description) -> {
++ LOGGER.error("Failed to serialize poi chunk for world: " + world.getWorld().getName() + ", chunk: (" + chunkX + "," + finalSectionY + "," + chunkZ + "); description: " + description);
++ }).orElse(null);
++ if (serialized == null) {
++ // failed, should be logged from the resultOrPartial
++ continue;
++ }
++
++ sections.put(Integer.toString(sectionY), serialized);
++ }
++
++ return sections.isEmpty() ? null : ret;
++ }
++
++ public static PoiChunk empty(final ServerLevel world, final int chunkX, final int chunkZ) {
++ final PoiChunk ret = new PoiChunk(world, chunkX, chunkZ, WorldUtil.getMinSection(world), WorldUtil.getMaxSection(world));
++ ret.loaded = true;
++ return ret;
++ }
++
++ public static PoiChunk parse(final ServerLevel world, final int chunkX, final int chunkZ, final CompoundTag data) {
++ final PoiChunk ret = empty(world, chunkX, chunkZ);
++
++ final RegistryOps registryOps = RegistryOps.create(NbtOps.INSTANCE, world.getPoiManager().registryAccess);
++
++ final CompoundTag sections = data.getCompound("Sections");
++
++ if (sections.isEmpty()) {
++ // nothing to parse
++ return ret;
++ }
++
++ final PoiManager poiManager = world.getPoiManager();
++
++ boolean readAnything = false;
++
++ for (int sectionY = ret.minSection; sectionY <= ret.maxSection; ++sectionY) {
++ final String key = Integer.toString(sectionY);
++ if (!sections.contains(key)) {
++ continue;
++ }
++
++ final long coordinateKey = CoordinateUtils.getChunkSectionKey(chunkX, sectionY, chunkZ);
++ // codecs are honestly such a fucking disaster. What the fuck is this trash?
++ final Codec codec = PoiSection.codec(() -> {
++ poiManager.setDirty(coordinateKey);
++ });
++
++ final CompoundTag section = sections.getCompound(key);
++ final DataResult deserializeResult = codec.parse(registryOps, section);
++ final int finalSectionY = sectionY;
++ final PoiSection deserialized = deserializeResult.resultOrPartial((final String description) -> {
++ LOGGER.error("Failed to deserialize poi chunk for world: " + world.getWorld().getName() + ", chunk: (" + chunkX + "," + finalSectionY + "," + chunkZ + "); description: " + description);
++ }).orElse(null);
++
++ if (deserialized == null || deserialized.isEmpty()) {
++ // completely empty, no point in storing this
++ continue;
++ }
++
++ readAnything = true;
++ ret.sections[sectionY - ret.minSection] = deserialized;
++ }
++
++ ret.loaded = !readAnything; // Set loaded to false if we read anything to ensure proper callbacks to PoiManager are made on #load
++
++ return ret;
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkFullTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkFullTask.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..fb42d776f15f735fb59e972e00e2b512c23a8387
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkFullTask.java
+@@ -0,0 +1,121 @@
++package io.papermc.paper.chunk.system.scheduling;
++
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
++import net.minecraft.server.level.ChunkMap;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.world.level.chunk.ChunkAccess;
++import net.minecraft.world.level.chunk.ChunkStatus;
++import net.minecraft.world.level.chunk.ImposterProtoChunk;
++import net.minecraft.world.level.chunk.LevelChunk;
++import net.minecraft.world.level.chunk.ProtoChunk;
++import java.lang.invoke.VarHandle;
++
++public final class ChunkFullTask extends ChunkProgressionTask implements Runnable {
++
++ protected final NewChunkHolder chunkHolder;
++ protected final ChunkAccess fromChunk;
++ protected final PrioritisedExecutor.PrioritisedTask convertToFullTask;
++
++ public ChunkFullTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX, final int chunkZ,
++ final NewChunkHolder chunkHolder, final ChunkAccess fromChunk, final PrioritisedExecutor.Priority priority) {
++ super(scheduler, world, chunkX, chunkZ);
++ this.chunkHolder = chunkHolder;
++ this.fromChunk = fromChunk;
++ this.convertToFullTask = scheduler.createChunkTask(chunkX, chunkZ, this, priority);
++ }
++
++ @Override
++ public ChunkStatus getTargetStatus() {
++ return ChunkStatus.FULL;
++ }
++
++ @Override
++ public void run() {
++ // See Vanilla protoChunkToFullChunk for what this function should be doing
++ final LevelChunk chunk;
++ try {
++ if (this.fromChunk instanceof ImposterProtoChunk wrappedFull) {
++ chunk = wrappedFull.getWrapped();
++ } else {
++ final ServerLevel world = this.world;
++ final ProtoChunk protoChunk = (ProtoChunk)this.fromChunk;
++ chunk = new LevelChunk(this.world, protoChunk, (final LevelChunk unused) -> {
++ ChunkMap.postLoadProtoChunk(world, protoChunk.getEntities());
++ });
++ }
++
++ chunk.setChunkHolder(this.scheduler.chunkHolderManager.getChunkHolder(this.chunkX, this.chunkZ)); // replaces setFullStatus
++ chunk.runPostLoad();
++ // Unlike Vanilla, we load the entity chunk here, as we load the NBT in empty status (unlike Vanilla)
++ // This brings entity addition back in line with older versions of the game
++ // Since we load the NBT in the empty status, this will never block for I/O
++ this.world.chunkTaskScheduler.chunkHolderManager.getOrCreateEntityChunk(this.chunkX, this.chunkZ, false);
++
++ // we don't need the entitiesInLevel trash, this system doesn't double run callbacks
++ chunk.setLoaded(true);
++ chunk.registerAllBlockEntitiesAfterLevelLoad();
++ chunk.registerTickContainerInLevel(this.world);
++ } catch (final Throwable throwable) {
++ this.complete(null, throwable);
++
++ if (throwable instanceof ThreadDeath) {
++ throw (ThreadDeath)throwable;
++ }
++ return;
++ }
++ this.complete(chunk, null);
++ }
++
++ protected volatile boolean scheduled;
++ protected static final VarHandle SCHEDULED_HANDLE = ConcurrentUtil.getVarHandle(ChunkFullTask.class, "scheduled", boolean.class);
++
++ @Override
++ public boolean isScheduled() {
++ return this.scheduled;
++ }
++
++ @Override
++ public void schedule() {
++ if ((boolean)SCHEDULED_HANDLE.getAndSet((ChunkFullTask)this, true)) {
++ throw new IllegalStateException("Cannot double call schedule()");
++ }
++ this.convertToFullTask.queue();
++ }
++
++ @Override
++ public void cancel() {
++ if (this.convertToFullTask.cancel()) {
++ this.complete(null, null);
++ }
++ }
++
++ @Override
++ public PrioritisedExecutor.Priority getPriority() {
++ return this.convertToFullTask.getPriority();
++ }
++
++ @Override
++ public void lowerPriority(final PrioritisedExecutor.Priority priority) {
++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority " + priority);
++ }
++ this.convertToFullTask.lowerPriority(priority);
++ }
++
++ @Override
++ public void setPriority(final PrioritisedExecutor.Priority priority) {
++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority " + priority);
++ }
++ this.convertToFullTask.setPriority(priority);
++ }
++
++ @Override
++ public void raisePriority(final PrioritisedExecutor.Priority priority) {
++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority " + priority);
++ }
++ this.convertToFullTask.raisePriority(priority);
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..748cc48c6c42c694d1c9b685e96fbe6d8337d3f3
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java
+@@ -0,0 +1,1211 @@
++package io.papermc.paper.chunk.system.scheduling;
++
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
++import ca.spottedleaf.concurrentutil.map.SWMRLong2ObjectHashTable;
++import co.aikar.timings.Timing;
++import com.google.common.collect.ImmutableList;
++import com.google.gson.JsonArray;
++import com.google.gson.JsonObject;
++import com.mojang.logging.LogUtils;
++import io.papermc.paper.chunk.system.io.RegionFileIOThread;
++import io.papermc.paper.chunk.system.poi.PoiChunk;
++import io.papermc.paper.util.CoordinateUtils;
++import io.papermc.paper.util.TickThread;
++import io.papermc.paper.util.misc.Delayed8WayDistancePropagator2D;
++import io.papermc.paper.world.ChunkEntitySlices;
++import it.unimi.dsi.fastutil.longs.Long2IntLinkedOpenHashMap;
++import it.unimi.dsi.fastutil.longs.Long2IntMap;
++import it.unimi.dsi.fastutil.longs.Long2IntOpenHashMap;
++import it.unimi.dsi.fastutil.longs.Long2ObjectMap;
++import it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap;
++import it.unimi.dsi.fastutil.longs.LongArrayList;
++import it.unimi.dsi.fastutil.longs.LongIterator;
++import it.unimi.dsi.fastutil.objects.ObjectRBTreeSet;
++import it.unimi.dsi.fastutil.objects.ReferenceLinkedOpenHashSet;
++import net.minecraft.nbt.CompoundTag;
++import io.papermc.paper.chunk.system.ChunkSystem;
++import net.minecraft.server.MinecraftServer;
++import net.minecraft.server.level.ChunkHolder;
++import net.minecraft.server.level.ChunkMap;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.server.level.Ticket;
++import net.minecraft.server.level.TicketType;
++import net.minecraft.util.SortedArraySet;
++import net.minecraft.util.Unit;
++import net.minecraft.world.level.ChunkPos;
++import net.minecraft.world.level.chunk.ChunkAccess;
++import net.minecraft.world.level.chunk.ChunkStatus;
++import org.bukkit.plugin.Plugin;
++import org.slf4j.Logger;
++import java.io.IOException;
++import java.text.DecimalFormat;
++import java.util.ArrayDeque;
++import java.util.ArrayList;
++import java.util.Collection;
++import java.util.Collections;
++import java.util.Iterator;
++import java.util.List;
++import java.util.Objects;
++import java.util.concurrent.TimeUnit;
++import java.util.concurrent.atomic.AtomicBoolean;
++import java.util.concurrent.atomic.AtomicReference;
++import java.util.concurrent.locks.LockSupport;
++import java.util.concurrent.locks.ReentrantLock;
++import java.util.function.Predicate;
++
++public final class ChunkHolderManager {
++
++ private static final Logger LOGGER = LogUtils.getClassLogger();
++
++ public static final int FULL_LOADED_TICKET_LEVEL = 33;
++ public static final int BLOCK_TICKING_TICKET_LEVEL = 32;
++ public static final int ENTITY_TICKING_TICKET_LEVEL = 31;
++ public static final int MAX_TICKET_LEVEL = ChunkMap.MAX_CHUNK_DISTANCE; // inclusive
++
++ private static final long NO_TIMEOUT_MARKER = -1L;
++
++ final ReentrantLock ticketLock = new ReentrantLock();
++
++ private final SWMRLong2ObjectHashTable chunkHolders = new SWMRLong2ObjectHashTable<>(16384, 0.25f);
++ private final Long2ObjectOpenHashMap>> tickets = new Long2ObjectOpenHashMap<>(8192, 0.25f);
++ // what a disaster of a name
++ // this is a map of removal tick to a map of chunks and the number of tickets a chunk has that are to expire that tick
++ private final Long2ObjectOpenHashMap removeTickToChunkExpireTicketCount = new Long2ObjectOpenHashMap<>();
++ private final ServerLevel world;
++ private final ChunkTaskScheduler taskScheduler;
++ private long currentTick;
++
++ private final ArrayDeque pendingFullLoadUpdate = new ArrayDeque<>();
++ private final ObjectRBTreeSet autoSaveQueue = new ObjectRBTreeSet<>((final NewChunkHolder c1, final NewChunkHolder c2) -> {
++ if (c1 == c2) {
++ return 0;
++ }
++
++ final int saveTickCompare = Long.compare(c1.lastAutoSave, c2.lastAutoSave);
++
++ if (saveTickCompare != 0) {
++ return saveTickCompare;
++ }
++
++ final long coord1 = CoordinateUtils.getChunkKey(c1.chunkX, c1.chunkZ);
++ final long coord2 = CoordinateUtils.getChunkKey(c2.chunkX, c2.chunkZ);
++
++ if (coord1 == coord2) {
++ throw new IllegalStateException("Duplicate chunkholder in auto save queue");
++ }
++
++ return Long.compare(coord1, coord2);
++ });
++
++ public ChunkHolderManager(final ServerLevel world, final ChunkTaskScheduler taskScheduler) {
++ this.world = world;
++ this.taskScheduler = taskScheduler;
++ }
++
++ private long statusUpgradeId;
++
++ long getNextStatusUpgradeId() {
++ return ++this.statusUpgradeId;
++ }
++
++ public List getOldChunkHolders() {
++ final List holders = this.getChunkHolders();
++ final List ret = new ArrayList<>(holders.size());
++ for (final NewChunkHolder holder : holders) {
++ ret.add(holder.vanillaChunkHolder);
++ }
++ return ret;
++ }
++
++ public List getChunkHolders() {
++ final List ret = new ArrayList<>(this.chunkHolders.size());
++ this.chunkHolders.forEachValue(ret::add);
++ return ret;
++ }
++
++ public int size() {
++ return this.chunkHolders.size();
++ }
++
++ public void close(final boolean save, final boolean halt) {
++ TickThread.ensureTickThread("Closing world off-main");
++ if (halt) {
++ LOGGER.info("Waiting 60s for chunk system to halt for world '" + this.world.getWorld().getName() + "'");
++ if (!this.taskScheduler.halt(true, TimeUnit.SECONDS.toNanos(60L))) {
++ LOGGER.warn("Failed to halt world generation/loading tasks for world '" + this.world.getWorld().getName() + "'");
++ } else {
++ LOGGER.info("Halted chunk system for world '" + this.world.getWorld().getName() + "'");
++ }
++ }
++
++ if (save) {
++ this.saveAllChunks(true, true, true);
++ }
++
++ if (this.world.chunkDataControllerNew.hasTasks() || this.world.entityDataControllerNew.hasTasks() || this.world.poiDataControllerNew.hasTasks()) {
++ RegionFileIOThread.flush();
++ }
++
++ // kill regionfile cache
++ try {
++ this.world.chunkDataControllerNew.getCache().close();
++ } catch (final IOException ex) {
++ LOGGER.error("Failed to close chunk regionfile cache for world '" + this.world.getWorld().getName() + "'", ex);
++ }
++ try {
++ this.world.entityDataControllerNew.getCache().close();
++ } catch (final IOException ex) {
++ LOGGER.error("Failed to close entity regionfile cache for world '" + this.world.getWorld().getName() + "'", ex);
++ }
++ try {
++ this.world.poiDataControllerNew.getCache().close();
++ } catch (final IOException ex) {
++ LOGGER.error("Failed to close poi regionfile cache for world '" + this.world.getWorld().getName() + "'", ex);
++ }
++ }
++
++ void ensureInAutosave(final NewChunkHolder holder) {
++ if (!this.autoSaveQueue.contains(holder)) {
++ holder.lastAutoSave = MinecraftServer.currentTick;
++ this.autoSaveQueue.add(holder);
++ }
++ }
++
++ public void autoSave() {
++ final List reschedule = new ArrayList<>();
++ final long currentTick = MinecraftServer.currentTickLong;
++ final long maxSaveTime = currentTick - this.world.paperConfig().chunks.autoSaveInterval.value();
++ for (int autoSaved = 0; autoSaved < this.world.paperConfig().chunks.maxAutoSaveChunksPerTick && !this.autoSaveQueue.isEmpty();) {
++ final NewChunkHolder holder = this.autoSaveQueue.first();
++
++ if (holder.lastAutoSave > maxSaveTime) {
++ break;
++ }
++
++ this.autoSaveQueue.remove(holder);
++
++ holder.lastAutoSave = currentTick;
++ if (holder.save(false, false) != null) {
++ ++autoSaved;
++ }
++
++ if (holder.getChunkStatus().isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
++ reschedule.add(holder);
++ }
++ }
++
++ for (final NewChunkHolder holder : reschedule) {
++ if (holder.getChunkStatus().isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
++ this.autoSaveQueue.add(holder);
++ }
++ }
++ }
++
++ public void saveAllChunks(final boolean flush, final boolean shutdown, final boolean logProgress) {
++ final List holders = this.getChunkHolders();
++
++ if (logProgress) {
++ LOGGER.info("Saving all chunkholders for world '" + this.world.getWorld().getName() + "'");
++ }
++
++ final DecimalFormat format = new DecimalFormat("#0.00");
++
++ int saved = 0;
++
++ long start = System.nanoTime();
++ long lastLog = start;
++ boolean needsFlush = false;
++ final int flushInterval = 50;
++
++ int savedChunk = 0;
++ int savedEntity = 0;
++ int savedPoi = 0;
++
++ for (int i = 0, len = holders.size(); i < len; ++i) {
++ final NewChunkHolder holder = holders.get(i);
++ try {
++ final NewChunkHolder.SaveStat saveStat = holder.save(shutdown, false);
++ if (saveStat != null) {
++ ++saved;
++ needsFlush = flush;
++ if (saveStat.savedChunk()) {
++ ++savedChunk;
++ }
++ if (saveStat.savedEntityChunk()) {
++ ++savedEntity;
++ }
++ if (saveStat.savedPoiChunk()) {
++ ++savedPoi;
++ }
++ }
++ } catch (final ThreadDeath thr) {
++ throw thr;
++ } catch (final Throwable thr) {
++ LOGGER.error("Failed to save chunk (" + holder.chunkX + "," + holder.chunkZ + ") in world '" + this.world.getWorld().getName() + "'", thr);
++ }
++ if (needsFlush && (saved % flushInterval) == 0) {
++ needsFlush = false;
++ RegionFileIOThread.partialFlush(flushInterval / 2);
++ }
++ if (logProgress) {
++ final long currTime = System.nanoTime();
++ if ((currTime - lastLog) > TimeUnit.SECONDS.toNanos(10L)) {
++ lastLog = currTime;
++ LOGGER.info("Saved " + saved + " chunks (" + format.format((double)(i+1)/(double)len * 100.0) + "%) in world '" + this.world.getWorld().getName() + "'");
++ }
++ }
++ }
++ if (flush) {
++ RegionFileIOThread.flush();
++ if (this.world.paperConfig().chunks.flushRegionsOnSave) {
++ try {
++ this.world.chunkSource.chunkMap.regionFileCache.flush();
++ } catch (IOException ex) {
++ LOGGER.error("Exception when flushing regions in world {}", this.world.getWorld().getName(), ex);
++ }
++ }
++ }
++ if (logProgress) {
++ LOGGER.info("Saved " + savedChunk + " block chunks, " + savedEntity + " entity chunks, " + savedPoi + " poi chunks in world '" + this.world.getWorld().getName() + "' in " + format.format(1.0E-9 * (System.nanoTime() - start)) + "s");
++ }
++ }
++
++ protected final Long2IntLinkedOpenHashMap ticketLevelUpdates = new Long2IntLinkedOpenHashMap() {
++ @Override
++ protected void rehash(final int newN) {
++ // no downsizing allowed
++ if (newN < this.n) {
++ return;
++ }
++ super.rehash(newN);
++ }
++ };
++
++ protected final Delayed8WayDistancePropagator2D ticketLevelPropagator = new Delayed8WayDistancePropagator2D(
++ (final long coordinate, final byte oldLevel, final byte newLevel) -> {
++ ChunkHolderManager.this.ticketLevelUpdates.putAndMoveToLast(coordinate, convertBetweenTicketLevels(newLevel));
++ }
++ );
++ // function for converting between ticket levels and propagator levels and vice versa
++ // the problem is the ticket level propagator will propagate from a set source down to zero, whereas mojang expects
++ // levels to propagate from a set value up to a maximum value. so we need to convert the levels we put into the propagator
++ // and the levels we get out of the propagator
++
++ public static int convertBetweenTicketLevels(final int level) {
++ return ChunkMap.MAX_CHUNK_DISTANCE - level + 1;
++ }
++
++ public boolean hasTickets() {
++ this.ticketLock.lock();
++ try {
++ return !this.tickets.isEmpty();
++ } finally {
++ this.ticketLock.unlock();
++ }
++ }
++
++ public String getTicketDebugString(final long coordinate) {
++ this.ticketLock.lock();
++ try {
++ final SortedArraySet> tickets = this.tickets.get(coordinate);
++
++ return tickets != null ? tickets.first().toString() : "no_ticket";
++ } finally {
++ this.ticketLock.unlock();
++ }
++ }
++
++ public Long2ObjectOpenHashMap>> getTicketsCopy() {
++ this.ticketLock.lock();
++ try {
++ return this.tickets.clone();
++ } finally {
++ this.ticketLock.unlock();
++ }
++ }
++
++ public Collection getPluginChunkTickets(int x, int z) {
++ ImmutableList.Builder ret;
++ this.ticketLock.lock();
++ try {
++ SortedArraySet> tickets = this.tickets.get(ChunkPos.asLong(x, z));
++
++ if (tickets == null) {
++ return Collections.emptyList();
++ }
++
++ ret = ImmutableList.builder();
++ for (Ticket> ticket : tickets) {
++ if (ticket.getType() == TicketType.PLUGIN_TICKET) {
++ ret.add((Plugin)ticket.key);
++ }
++ }
++ } finally {
++ this.ticketLock.unlock();
++ }
++
++ return ret.build();
++ }
++
++ protected final int getPropagatedTicketLevel(final long coordinate) {
++ return convertBetweenTicketLevels(this.ticketLevelPropagator.getLevel(coordinate));
++ }
++
++ protected final void updateTicketLevel(final long coordinate, final int ticketLevel) {
++ if (ticketLevel > ChunkMap.MAX_CHUNK_DISTANCE) {
++ this.ticketLevelPropagator.removeSource(coordinate);
++ } else {
++ this.ticketLevelPropagator.setSource(coordinate, convertBetweenTicketLevels(ticketLevel));
++ }
++ }
++
++ private static int getTicketLevelAt(SortedArraySet> tickets) {
++ return !tickets.isEmpty() ? tickets.first().getTicketLevel() : MAX_TICKET_LEVEL + 1;
++ }
++
++ public boolean addTicketAtLevel(final TicketType type, final ChunkPos chunkPos, final int level,
++ final T identifier) {
++ return this.addTicketAtLevel(type, CoordinateUtils.getChunkKey(chunkPos), level, identifier);
++ }
++
++ public boolean addTicketAtLevel(final TicketType type, final int chunkX, final int chunkZ, final int level,
++ final T identifier) {
++ return this.addTicketAtLevel(type, CoordinateUtils.getChunkKey(chunkX, chunkZ), level, identifier);
++ }
++
++ // supposed to return true if the ticket was added and did not replace another
++ // but, we always return false if the ticket cannot be added
++ public boolean addTicketAtLevel(final TicketType type, final long chunk, final int level, final T identifier) {
++ final long removeDelay = Math.max(0, type.timeout);
++ if (level > MAX_TICKET_LEVEL) {
++ return false;
++ }
++
++ this.ticketLock.lock();
++ try {
++ final long removeTick = removeDelay == 0 ? NO_TIMEOUT_MARKER : this.currentTick + removeDelay;
++ final Ticket ticket = new Ticket<>(type, level, identifier, removeTick);
++
++ final SortedArraySet> ticketsAtChunk = this.tickets.computeIfAbsent(chunk, (final long keyInMap) -> {
++ return SortedArraySet.create(4);
++ });
++
++ final int levelBefore = getTicketLevelAt(ticketsAtChunk);
++ final Ticket current = (Ticket)ticketsAtChunk.replace(ticket);
++ final int levelAfter = getTicketLevelAt(ticketsAtChunk);
++
++ if (current != ticket) {
++ final long oldRemovalTick = current.removalTick;
++ if (removeTick != oldRemovalTick) {
++ if (oldRemovalTick != NO_TIMEOUT_MARKER) {
++ final Long2IntOpenHashMap removeCounts = this.removeTickToChunkExpireTicketCount.get(oldRemovalTick);
++ final int prevCount = removeCounts.addTo(chunk, -1);
++
++ if (prevCount == 1) {
++ removeCounts.remove(chunk);
++ if (removeCounts.isEmpty()) {
++ this.removeTickToChunkExpireTicketCount.remove(oldRemovalTick);
++ }
++ }
++ }
++ if (removeTick != NO_TIMEOUT_MARKER) {
++ this.removeTickToChunkExpireTicketCount.computeIfAbsent(removeTick, (final long keyInMap) -> {
++ return new Long2IntOpenHashMap();
++ }).addTo(chunk, 1);
++ }
++ }
++ } else {
++ if (removeTick != NO_TIMEOUT_MARKER) {
++ this.removeTickToChunkExpireTicketCount.computeIfAbsent(removeTick, (final long keyInMap) -> {
++ return new Long2IntOpenHashMap();
++ }).addTo(chunk, 1);
++ }
++ }
++
++ if (levelBefore != levelAfter) {
++ this.updateTicketLevel(chunk, levelAfter);
++ }
++
++ return current == ticket;
++ } finally {
++ this.ticketLock.unlock();
++ }
++ }
++
++ public boolean removeTicketAtLevel(final TicketType type, final ChunkPos chunkPos, final int level, final T identifier) {
++ return this.removeTicketAtLevel(type, CoordinateUtils.getChunkKey(chunkPos), level, identifier);
++ }
++
++ public boolean removeTicketAtLevel(final TicketType type, final int chunkX, final int chunkZ, final int level, final T identifier) {
++ return this.removeTicketAtLevel(type, CoordinateUtils.getChunkKey(chunkX, chunkZ), level, identifier);
++ }
++
++ public boolean removeTicketAtLevel(final TicketType type, final long chunk, final int level, final T identifier) {
++ if (level > MAX_TICKET_LEVEL) {
++ return false;
++ }
++
++ this.ticketLock.lock();
++ try {
++ final SortedArraySet> ticketsAtChunk = this.tickets.get(chunk);
++ if (ticketsAtChunk == null) {
++ return false;
++ }
++
++ final int oldLevel = getTicketLevelAt(ticketsAtChunk);
++ final Ticket ticket = (Ticket)ticketsAtChunk.removeAndGet(new Ticket<>(type, level, identifier, -2L));
++
++ if (ticket == null) {
++ return false;
++ }
++
++ if (ticketsAtChunk.isEmpty()) {
++ this.tickets.remove(chunk);
++ }
++
++ final int newLevel = getTicketLevelAt(ticketsAtChunk);
++
++ final long removeTick = ticket.removalTick;
++ if (removeTick != NO_TIMEOUT_MARKER) {
++ final Long2IntOpenHashMap removeCounts = this.removeTickToChunkExpireTicketCount.get(removeTick);
++ final int currCount = removeCounts.addTo(chunk, -1);
++
++ if (currCount == 1) {
++ removeCounts.remove(chunk);
++ if (removeCounts.isEmpty()) {
++ this.removeTickToChunkExpireTicketCount.remove(removeTick);
++ }
++ }
++ }
++
++ if (oldLevel != newLevel) {
++ this.updateTicketLevel(chunk, newLevel);
++ }
++
++ return true;
++ } finally {
++ this.ticketLock.unlock();
++ }
++ }
++
++ // atomic with respect to all add/remove/addandremove ticket calls for the given chunk
++ public void addAndRemoveTickets(final long chunk, final TicketType addType, final int addLevel, final T addIdentifier,
++ final TicketType removeType, final int removeLevel, final V removeIdentifier) {
++ this.ticketLock.lock();
++ try {
++ this.addTicketAtLevel(addType, chunk, addLevel, addIdentifier);
++ this.removeTicketAtLevel(removeType, chunk, removeLevel, removeIdentifier);
++ } finally {
++ this.ticketLock.unlock();
++ }
++ }
++
++ public void removeAllTicketsFor(final TicketType ticketType, final int ticketLevel, final T ticketIdentifier) {
++ if (ticketLevel > MAX_TICKET_LEVEL) {
++ return;
++ }
++
++ this.ticketLock.lock();
++ try {
++ for (final LongIterator iterator = new LongArrayList(this.tickets.keySet()).longIterator(); iterator.hasNext();) {
++ final long chunk = iterator.nextLong();
++
++ this.removeTicketAtLevel(ticketType, chunk, ticketLevel, ticketIdentifier);
++ }
++ } finally {
++ this.ticketLock.unlock();
++ }
++ }
++
++ public void tick() {
++ TickThread.ensureTickThread("Cannot tick ticket manager off-main");
++
++ this.ticketLock.lock();
++ try {
++ final long tick = ++this.currentTick;
++
++ final Long2IntOpenHashMap toRemove = this.removeTickToChunkExpireTicketCount.remove(tick);
++
++ if (toRemove == null) {
++ return;
++ }
++
++ final Predicate> expireNow = (final Ticket> ticket) -> {
++ return ticket.removalTick == tick;
++ };
++
++ for (final LongIterator iterator = toRemove.keySet().longIterator(); iterator.hasNext();) {
++ final long chunk = iterator.nextLong();
++
++ final SortedArraySet> tickets = this.tickets.get(chunk);
++ tickets.removeIf(expireNow);
++ if (tickets.isEmpty()) {
++ this.tickets.remove(chunk);
++ this.ticketLevelPropagator.removeSource(chunk);
++ } else {
++ this.ticketLevelPropagator.setSource(chunk, convertBetweenTicketLevels(tickets.first().getTicketLevel()));
++ }
++ }
++ } finally {
++ this.ticketLock.unlock();
++ }
++
++ this.processTicketUpdates();
++ }
++
++ public NewChunkHolder getChunkHolder(final int chunkX, final int chunkZ) {
++ return this.chunkHolders.get(CoordinateUtils.getChunkKey(chunkX, chunkZ));
++ }
++
++ public NewChunkHolder getChunkHolder(final long position) {
++ return this.chunkHolders.get(position);
++ }
++
++ public void raisePriority(final int x, final int z, final PrioritisedExecutor.Priority priority) {
++ final NewChunkHolder chunkHolder = this.getChunkHolder(x, z);
++ if (chunkHolder != null) {
++ chunkHolder.raisePriority(priority);
++ }
++ }
++
++ public void setPriority(final int x, final int z, final PrioritisedExecutor.Priority priority) {
++ final NewChunkHolder chunkHolder = this.getChunkHolder(x, z);
++ if (chunkHolder != null) {
++ chunkHolder.setPriority(priority);
++ }
++ }
++
++ public void lowerPriority(final int x, final int z, final PrioritisedExecutor.Priority priority) {
++ final NewChunkHolder chunkHolder = this.getChunkHolder(x, z);
++ if (chunkHolder != null) {
++ chunkHolder.lowerPriority(priority);
++ }
++ }
++
++ private NewChunkHolder createChunkHolder(final long position) {
++ final NewChunkHolder ret = new NewChunkHolder(this.world, CoordinateUtils.getChunkX(position), CoordinateUtils.getChunkZ(position), this.taskScheduler);
++
++ ChunkSystem.onChunkHolderCreate(this.world, ret.vanillaChunkHolder);
++ ret.vanillaChunkHolder.onChunkAdd();
++
++ return ret;
++ }
++
++ // because this function creates the chunk holder without a ticket, it is the caller's responsibility to ensure
++ // the chunk holder eventually unloads. this should only be used to avoid using processTicketUpdates to create chunkholders,
++ // as processTicketUpdates may call plugin logic; in every other case a ticket is appropriate
++ private NewChunkHolder getOrCreateChunkHolder(final int chunkX, final int chunkZ) {
++ return this.getOrCreateChunkHolder(CoordinateUtils.getChunkKey(chunkX, chunkZ));
++ }
++
++ private NewChunkHolder getOrCreateChunkHolder(final long position) {
++ if (!this.ticketLock.isHeldByCurrentThread()) {
++ throw new IllegalStateException("Must hold ticket level update lock!");
++ }
++ if (!this.taskScheduler.schedulingLock.isHeldByCurrentThread()) {
++ throw new IllegalStateException("Must hold scheduler lock!!");
++ }
++
++ // we could just acquire these locks, but...
++ // must own the locks because the caller needs to ensure that no unload can occur AFTER this function returns
++
++ NewChunkHolder current = this.chunkHolders.get(position);
++ if (current != null) {
++ return current;
++ }
++
++ current = this.createChunkHolder(position);
++ this.chunkHolders.put(position, current);
++
++ return current;
++ }
++
++ private long entityLoadCounter;
++
++ public ChunkEntitySlices getOrCreateEntityChunk(final int chunkX, final int chunkZ, final boolean transientChunk) {
++ TickThread.ensureTickThread(this.world, chunkX, chunkZ, "Cannot create entity chunk off-main");
++ ChunkEntitySlices ret;
++
++ NewChunkHolder current = this.getChunkHolder(chunkX, chunkZ);
++ if (current != null && (ret = current.getEntityChunk()) != null && (transientChunk || !ret.isTransient())) {
++ return ret;
++ }
++
++ final AtomicBoolean isCompleted = new AtomicBoolean();
++ final Thread waiter = Thread.currentThread();
++ final Long entityLoadId;
++ NewChunkHolder.GenericDataLoadTaskCallback loadTask = null;
++ this.ticketLock.lock();
++ try {
++ entityLoadId = Long.valueOf(this.entityLoadCounter++);
++ this.addTicketAtLevel(TicketType.ENTITY_LOAD, chunkX, chunkZ, MAX_TICKET_LEVEL, entityLoadId);
++ this.taskScheduler.schedulingLock.lock();
++ try {
++ current = this.getOrCreateChunkHolder(chunkX, chunkZ);
++ if ((ret = current.getEntityChunk()) != null && (transientChunk || !ret.isTransient())) {
++ this.removeTicketAtLevel(TicketType.ENTITY_LOAD, chunkX, chunkZ, MAX_TICKET_LEVEL, entityLoadId);
++ return ret;
++ }
++
++ if (current.isEntityChunkNBTLoaded()) {
++ isCompleted.setPlain(true);
++ } else {
++ loadTask = current.getOrLoadEntityData((final GenericDataLoadTask.TaskResult result) -> {
++ if (!transientChunk) {
++ isCompleted.set(true);
++ LockSupport.unpark(waiter);
++ }
++ });
++ final ChunkLoadTask.EntityDataLoadTask entityLoad = current.getEntityDataLoadTask();
++
++ if (entityLoad != null && !transientChunk) {
++ entityLoad.raisePriority(PrioritisedExecutor.Priority.BLOCKING);
++ }
++ }
++ } finally {
++ this.taskScheduler.schedulingLock.unlock();
++ }
++ } finally {
++ this.ticketLock.unlock();
++ }
++
++ if (loadTask != null) {
++ loadTask.schedule();
++ }
++
++ if (!transientChunk) {
++ // Note: no need to busy wait on the chunk queue, entity load will complete off-main
++ boolean interrupted = false;
++ while (!isCompleted.get()) {
++ interrupted |= Thread.interrupted();
++ LockSupport.park();
++ }
++
++ if (interrupted) {
++ Thread.currentThread().interrupt();
++ }
++ }
++
++ // now that the entity data is loaded, we can load it into the world
++
++ ret = current.loadInEntityChunk(transientChunk);
++
++ final long chunkKey = CoordinateUtils.getChunkKey(chunkX, chunkZ);
++ this.addAndRemoveTickets(chunkKey,
++ TicketType.UNKNOWN, MAX_TICKET_LEVEL, new ChunkPos(chunkX, chunkZ),
++ TicketType.ENTITY_LOAD, MAX_TICKET_LEVEL, entityLoadId
++ );
++
++ return ret;
++ }
++
++ public PoiChunk getPoiChunkIfLoaded(final int chunkX, final int chunkZ, final boolean checkLoadInCallback) {
++ final NewChunkHolder holder = this.getChunkHolder(chunkX, chunkZ);
++ if (holder != null) {
++ final PoiChunk ret = holder.getPoiChunk();
++ return ret == null || (checkLoadInCallback && !ret.isLoaded()) ? null : ret;
++ }
++ return null;
++ }
++
++ private long poiLoadCounter;
++
++ public PoiChunk loadPoiChunk(final int chunkX, final int chunkZ) {
++ TickThread.ensureTickThread(this.world, chunkX, chunkZ, "Cannot create poi chunk off-main");
++ PoiChunk ret;
++
++ NewChunkHolder current = this.getChunkHolder(chunkX, chunkZ);
++ if (current != null && (ret = current.getPoiChunk()) != null) {
++ if (!ret.isLoaded()) {
++ ret.load();
++ }
++ return ret;
++ }
++
++ final AtomicReference completed = new AtomicReference<>();
++ final AtomicBoolean isCompleted = new AtomicBoolean();
++ final Thread waiter = Thread.currentThread();
++ final Long poiLoadId;
++ NewChunkHolder.GenericDataLoadTaskCallback loadTask = null;
++ this.ticketLock.lock();
++ try {
++ poiLoadId = Long.valueOf(this.poiLoadCounter++);
++ this.addTicketAtLevel(TicketType.POI_LOAD, chunkX, chunkZ, MAX_TICKET_LEVEL, poiLoadId);
++ this.taskScheduler.schedulingLock.lock();
++ try {
++ current = this.getOrCreateChunkHolder(chunkX, chunkZ);
++ if (current.isPoiChunkLoaded()) {
++ this.removeTicketAtLevel(TicketType.POI_LOAD, chunkX, chunkZ, MAX_TICKET_LEVEL, poiLoadId);
++ return current.getPoiChunk();
++ }
++
++ loadTask = current.getOrLoadPoiData((final GenericDataLoadTask.TaskResult result) -> {
++ completed.setPlain(result.left());
++ isCompleted.set(true);
++ LockSupport.unpark(waiter);
++ });
++ final ChunkLoadTask.PoiDataLoadTask poiLoad = current.getPoiDataLoadTask();
++
++ if (poiLoad != null) {
++ poiLoad.raisePriority(PrioritisedExecutor.Priority.BLOCKING);
++ }
++ } finally {
++ this.taskScheduler.schedulingLock.unlock();
++ }
++ } finally {
++ this.ticketLock.unlock();
++ }
++
++ if (loadTask != null) {
++ loadTask.schedule();
++ }
++
++ // Note: no need to busy wait on the chunk queue, poi load will complete off-main
++
++ boolean interrupted = false;
++ while (!isCompleted.get()) {
++ interrupted |= Thread.interrupted();
++ LockSupport.park();
++ }
++
++ if (interrupted) {
++ Thread.currentThread().interrupt();
++ }
++
++ ret = completed.getPlain();
++
++ ret.load();
++
++ final long chunkKey = CoordinateUtils.getChunkKey(chunkX, chunkZ);
++ this.addAndRemoveTickets(chunkKey,
++ TicketType.UNKNOWN, MAX_TICKET_LEVEL, new ChunkPos(chunkX, chunkZ),
++ TicketType.POI_LOAD, MAX_TICKET_LEVEL, poiLoadId
++ );
++
++ return ret;
++ }
++
++ void addChangedStatuses(final List changedFullStatus) {
++ if (changedFullStatus.isEmpty()) {
++ return;
++ }
++ if (!TickThread.isTickThread()) {
++ this.taskScheduler.scheduleChunkTask(() -> {
++ final ArrayDeque pendingFullLoadUpdate = ChunkHolderManager.this.pendingFullLoadUpdate;
++ for (int i = 0, len = changedFullStatus.size(); i < len; ++i) {
++ pendingFullLoadUpdate.add(changedFullStatus.get(i));
++ }
++
++ ChunkHolderManager.this.processPendingFullUpdate();
++ }, PrioritisedExecutor.Priority.HIGHEST);
++ } else {
++ final ArrayDeque pendingFullLoadUpdate = this.pendingFullLoadUpdate;
++ for (int i = 0, len = changedFullStatus.size(); i < len; ++i) {
++ pendingFullLoadUpdate.add(changedFullStatus.get(i));
++ }
++ }
++ }
++
++ final ReferenceLinkedOpenHashSet unloadQueue = new ReferenceLinkedOpenHashSet<>();
++
++ private void removeChunkHolder(final NewChunkHolder holder) {
++ holder.killed = true;
++ holder.vanillaChunkHolder.onChunkRemove();
++ this.autoSaveQueue.remove(holder);
++ ChunkSystem.onChunkHolderDelete(this.world, holder.vanillaChunkHolder);
++ this.chunkHolders.remove(CoordinateUtils.getChunkKey(holder.chunkX, holder.chunkZ));
++ }
++
++ // note: never call while inside the chunk system, this will absolutely break everything
++ public void processUnloads() {
++ TickThread.ensureTickThread("Cannot unload chunks off-main");
++
++ if (BLOCK_TICKET_UPDATES.get() == Boolean.TRUE) {
++ throw new IllegalStateException("Cannot unload chunks recursively");
++ }
++ if (this.ticketLock.isHeldByCurrentThread()) {
++ throw new IllegalStateException("Cannot hold ticket update lock while calling processUnloads");
++ }
++ if (this.taskScheduler.schedulingLock.isHeldByCurrentThread()) {
++ throw new IllegalStateException("Cannot hold scheduling lock while calling processUnloads");
++ }
++
++ final List unloadQueue;
++ final List scheduleList = new ArrayList<>();
++ this.ticketLock.lock();
++ try {
++ this.taskScheduler.schedulingLock.lock();
++ try {
++ if (this.unloadQueue.isEmpty()) {
++ return;
++ }
++ // in order to ensure all chunks in the unload queue do not have a pending ticket level update,
++ // process them now
++ this.processTicketUpdates(false, false, scheduleList);
++ unloadQueue = new ArrayList<>((int)(this.unloadQueue.size() * 0.05) + 1);
++
++ final int unloadCount = Math.max(50, (int)(this.unloadQueue.size() * 0.05));
++ for (int i = 0; i < unloadCount && !this.unloadQueue.isEmpty(); ++i) {
++ final NewChunkHolder chunkHolder = this.unloadQueue.removeFirst();
++ if (chunkHolder.isSafeToUnload() != null) {
++ LOGGER.error("Chunkholder " + chunkHolder + " is not safe to unload but is inside the unload queue?");
++ continue;
++ }
++ final NewChunkHolder.UnloadState state = chunkHolder.unloadStage1();
++ if (state == null) {
++ // can unload immediately
++ this.removeChunkHolder(chunkHolder);
++ continue;
++ }
++ unloadQueue.add(state);
++ }
++ } finally {
++ this.taskScheduler.schedulingLock.unlock();
++ }
++ } finally {
++ this.ticketLock.unlock();
++ }
++ // schedule tasks, we can't let processTicketUpdates do this because we call it holding the schedule lock
++ for (int i = 0, len = scheduleList.size(); i < len; ++i) {
++ scheduleList.get(i).schedule();
++ }
++
++ final List toRemove = new ArrayList<>(unloadQueue.size());
++
++ final Boolean before = this.blockTicketUpdates();
++ try {
++ for (int i = 0, len = unloadQueue.size(); i < len; ++i) {
++ final NewChunkHolder.UnloadState state = unloadQueue.get(i);
++ final NewChunkHolder holder = state.holder();
++
++ holder.unloadStage2(state);
++ toRemove.add(holder);
++ }
++ } finally {
++ this.unblockTicketUpdates(before);
++ }
++
++ this.ticketLock.lock();
++ try {
++ this.taskScheduler.schedulingLock.lock();
++ try {
++ for (int i = 0, len = toRemove.size(); i < len; ++i) {
++ final NewChunkHolder holder = toRemove.get(i);
++
++ if (holder.unloadStage3()) {
++ this.removeChunkHolder(holder);
++ } else {
++ // add cooldown so the next unload check is not immediately next tick
++ this.addTicketAtLevel(TicketType.UNLOAD_COOLDOWN, holder.chunkX, holder.chunkZ, MAX_TICKET_LEVEL, Unit.INSTANCE);
++ }
++ }
++ } finally {
++ this.taskScheduler.schedulingLock.unlock();
++ }
++ } finally {
++ this.ticketLock.unlock();
++ }
++ }
++
++ private final ThreadLocal BLOCK_TICKET_UPDATES = ThreadLocal.withInitial(() -> {
++ return Boolean.FALSE;
++ });
++
++ public Boolean blockTicketUpdates() {
++ final Boolean ret = BLOCK_TICKET_UPDATES.get();
++ BLOCK_TICKET_UPDATES.set(Boolean.TRUE);
++ return ret;
++ }
++
++ public void unblockTicketUpdates(final Boolean before) {
++ BLOCK_TICKET_UPDATES.set(before);
++ }
++
++ public boolean processTicketUpdates() {
++ return this.processTicketUpdates(true, true, null);
++ }
++
++ private static final ThreadLocal> CURRENT_TICKET_UPDATE_SCHEDULING = new ThreadLocal<>();
++
++ static List getCurrentTicketUpdateScheduling() {
++ return CURRENT_TICKET_UPDATE_SCHEDULING.get();
++ }
++
++ private boolean processTicketUpdates(final boolean checkLocks, final boolean processFullUpdates, List scheduledTasks) {
++ TickThread.ensureTickThread("Cannot process ticket levels off-main");
++ if (BLOCK_TICKET_UPDATES.get() == Boolean.TRUE) {
++ throw new IllegalStateException("Cannot update ticket level while unloading chunks or updating entity manager");
++ }
++ if (checkLocks && this.ticketLock.isHeldByCurrentThread()) {
++ throw new IllegalStateException("Illegal recursive processTicketUpdates!");
++ }
++ if (checkLocks && this.taskScheduler.schedulingLock.isHeldByCurrentThread()) {
++ throw new IllegalStateException("Cannot update ticket levels from a scheduler context!");
++ }
++
++ List changedFullStatus = null;
++
++ final boolean isTickThread = TickThread.isTickThread();
++
++ boolean ret = false;
++ final boolean canProcessFullUpdates = processFullUpdates & isTickThread;
++ final boolean canProcessScheduling = scheduledTasks == null;
++
++ this.ticketLock.lock();
++ try {
++ final boolean levelsUpdated = this.ticketLevelPropagator.propagateUpdates();
++ if (levelsUpdated) {
++ // Unlike CB, ticket level updates cannot happen recursively. Thank god.
++ if (!this.ticketLevelUpdates.isEmpty()) {
++ ret = true;
++
++ // first the necessary chunkholders must be created, so just update the ticket levels
++ for (final Iterator iterator = this.ticketLevelUpdates.long2IntEntrySet().fastIterator(); iterator.hasNext();) {
++ final Long2IntMap.Entry entry = iterator.next();
++ final long key = entry.getLongKey();
++ final int newLevel = entry.getIntValue();
++
++ NewChunkHolder current = this.chunkHolders.get(key);
++ if (current == null && newLevel > MAX_TICKET_LEVEL) {
++ // not loaded and it shouldn't be loaded!
++ iterator.remove();
++ continue;
++ }
++
++ final int currentLevel = current == null ? MAX_TICKET_LEVEL + 1 : current.getCurrentTicketLevel();
++ if (currentLevel == newLevel) {
++ // nothing to do
++ iterator.remove();
++ continue;
++ }
++
++ if (current == null) {
++ // must create
++ current = this.createChunkHolder(key);
++ this.chunkHolders.put(key, current);
++ current.updateTicketLevel(newLevel);
++ } else {
++ current.updateTicketLevel(newLevel);
++ }
++ }
++
++ if (scheduledTasks == null) {
++ scheduledTasks = new ArrayList<>();
++ }
++ changedFullStatus = new ArrayList<>();
++
++ // allow the chunkholders to process ticket level updates without needing to acquire the schedule lock every time
++ final List prev = CURRENT_TICKET_UPDATE_SCHEDULING.get();
++ CURRENT_TICKET_UPDATE_SCHEDULING.set(scheduledTasks);
++ try {
++ this.taskScheduler.schedulingLock.lock();
++ try {
++ for (final Iterator iterator = this.ticketLevelUpdates.long2IntEntrySet().fastIterator(); iterator.hasNext();) {
++ final Long2IntMap.Entry entry = iterator.next();
++ final long key = entry.getLongKey();
++ final NewChunkHolder current = this.chunkHolders.get(key);
++
++ if (current == null) {
++ throw new IllegalStateException("Expected chunk holder to be created");
++ }
++
++ current.processTicketLevelUpdate(scheduledTasks, changedFullStatus);
++ }
++ } finally {
++ this.taskScheduler.schedulingLock.unlock();
++ }
++ } finally {
++ CURRENT_TICKET_UPDATE_SCHEDULING.set(prev);
++ }
++
++ this.ticketLevelUpdates.clear();
++ }
++ }
++ } finally {
++ this.ticketLock.unlock();
++ }
++
++ if (changedFullStatus != null) {
++ this.addChangedStatuses(changedFullStatus);
++ }
++
++ if (canProcessScheduling && scheduledTasks != null) {
++ for (int i = 0, len = scheduledTasks.size(); i < len; ++i) {
++ scheduledTasks.get(i).schedule();
++ }
++ }
++
++ if (canProcessFullUpdates) {
++ ret |= this.processPendingFullUpdate();
++ }
++
++ return ret;
++ }
++
++ // only call on tick thread
++ protected final boolean processPendingFullUpdate() {
++ final ArrayDeque pendingFullLoadUpdate = this.pendingFullLoadUpdate;
++
++ boolean ret = false;
++
++ List changedFullStatus = new ArrayList<>();
++
++ NewChunkHolder holder;
++ while ((holder = pendingFullLoadUpdate.poll()) != null) {
++ ret |= holder.handleFullStatusChange(changedFullStatus);
++
++ if (!changedFullStatus.isEmpty()) {
++ for (int i = 0, len = changedFullStatus.size(); i < len; ++i) {
++ pendingFullLoadUpdate.add(changedFullStatus.get(i));
++ }
++ changedFullStatus.clear();
++ }
++ }
++
++ return ret;
++ }
++
++ public JsonObject getDebugJsonForWatchdog() {
++ // try and detect any potential deadlock that would require us to read unlocked
++ try {
++ if (this.ticketLock.tryLock(10, TimeUnit.SECONDS)) {
++ try {
++ if (this.taskScheduler.schedulingLock.tryLock(10, TimeUnit.SECONDS)) {
++ try {
++ return this.getDebugJsonNoLock();
++ } finally {
++ this.taskScheduler.schedulingLock.unlock();
++ }
++ }
++ } finally {
++ this.ticketLock.unlock();
++ }
++ }
++ } catch (final InterruptedException ignore) {}
++
++ LOGGER.error("Failed to acquire ticket and scheduling lock before timeout for world " + this.world.getWorld().getName());
++
++ // because we read without locks, it may throw exceptions for fastutil maps
++ // so just try until it works...
++ Throwable lastException = null;
++ for (int count = 0;count < 1000;++count) {
++ try {
++ return this.getDebugJsonNoLock();
++ } catch (final ThreadDeath death) {
++ throw death;
++ } catch (final Throwable thr) {
++ lastException = thr;
++ Thread.yield();
++ LockSupport.parkNanos(10_000L);
++ }
++ }
++
++ // failed, return
++ LOGGER.error("Failed to retrieve debug json for watchdog thread without locking", lastException);
++ return null;
++ }
++
++ private JsonObject getDebugJsonNoLock() {
++ final JsonObject ret = new JsonObject();
++ ret.addProperty("current_tick", Long.valueOf(this.currentTick));
++
++ final JsonArray unloadQueue = new JsonArray();
++ ret.add("unload_queue", unloadQueue);
++ for (final NewChunkHolder holder : this.unloadQueue) {
++ final JsonObject coordinate = new JsonObject();
++ unloadQueue.add(coordinate);
++
++ coordinate.addProperty("chunkX", Integer.valueOf(holder.chunkX));
++ coordinate.addProperty("chunkZ", Integer.valueOf(holder.chunkZ));
++ }
++
++ final JsonArray holders = new JsonArray();
++ ret.add("chunkholders", holders);
++
++ for (final NewChunkHolder holder : this.getChunkHolders()) {
++ holders.add(holder.getDebugJson());
++ }
++
++ final JsonArray removeTickToChunkExpireTicketCount = new JsonArray();
++ ret.add("remove_tick_to_chunk_expire_ticket_count", removeTickToChunkExpireTicketCount);
++
++ for (final Long2ObjectMap.Entry tickEntry : this.removeTickToChunkExpireTicketCount.long2ObjectEntrySet()) {
++ final long tick = tickEntry.getLongKey();
++ final Long2IntOpenHashMap coordinateToCount = tickEntry.getValue();
++
++ final JsonObject tickJson = new JsonObject();
++ removeTickToChunkExpireTicketCount.add(tickJson);
++
++ tickJson.addProperty("tick", Long.valueOf(tick));
++
++ final JsonArray tickEntries = new JsonArray();
++ tickJson.add("entries", tickEntries);
++
++ for (final Long2IntMap.Entry entry : coordinateToCount.long2IntEntrySet()) {
++ final long coordinate = entry.getLongKey();
++ final int count = entry.getIntValue();
++
++ final JsonObject entryJson = new JsonObject();
++ tickEntries.add(entryJson);
++
++ entryJson.addProperty("chunkX", Long.valueOf(CoordinateUtils.getChunkX(coordinate)));
++ entryJson.addProperty("chunkZ", Long.valueOf(CoordinateUtils.getChunkZ(coordinate)));
++ entryJson.addProperty("count", Integer.valueOf(count));
++ }
++ }
++
++ final JsonArray allTicketsJson = new JsonArray();
++ ret.add("tickets", allTicketsJson);
++
++ for (final Long2ObjectMap.Entry>> coordinateTickets : this.tickets.long2ObjectEntrySet()) {
++ final long coordinate = coordinateTickets.getLongKey();
++ final SortedArraySet> tickets = coordinateTickets.getValue();
++
++ final JsonObject coordinateJson = new JsonObject();
++ allTicketsJson.add(coordinateJson);
++
++ coordinateJson.addProperty("chunkX", Long.valueOf(CoordinateUtils.getChunkX(coordinate)));
++ coordinateJson.addProperty("chunkZ", Long.valueOf(CoordinateUtils.getChunkZ(coordinate)));
++
++ final JsonArray ticketsSerialized = new JsonArray();
++ coordinateJson.add("tickets", ticketsSerialized);
++
++ for (final Ticket> ticket : tickets) {
++ final JsonObject ticketSerialized = new JsonObject();
++ ticketsSerialized.add(ticketSerialized);
++
++ ticketSerialized.addProperty("type", ticket.getType().toString());
++ ticketSerialized.addProperty("level", Integer.valueOf(ticket.getTicketLevel()));
++ ticketSerialized.addProperty("identifier", Objects.toString(ticket.key));
++ ticketSerialized.addProperty("remove_tick", Long.valueOf(ticket.removalTick));
++ }
++ }
++
++ return ret;
++ }
++
++ public JsonObject getDebugJson() {
++ final List scheduleList = new ArrayList<>();
++ try {
++ final JsonObject ret;
++ this.ticketLock.lock();
++ try {
++ this.taskScheduler.schedulingLock.lock();
++ try {
++ this.processTicketUpdates(false, false, scheduleList);
++ ret = this.getDebugJsonNoLock();
++ } finally {
++ this.taskScheduler.schedulingLock.unlock();
++ }
++ } finally {
++ this.ticketLock.unlock();
++ }
++ return ret;
++ } finally {
++ // schedule tasks, we can't let processTicketUpdates do this because we call it holding the schedule lock
++ for (int i = 0, len = scheduleList.size(); i < len; ++i) {
++ scheduleList.get(i).schedule();
++ }
++ }
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLightTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLightTask.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..53ddd7e9ac05e6a9eb809f329796e6d4f6bb2ab1
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLightTask.java
+@@ -0,0 +1,181 @@
++package io.papermc.paper.chunk.system.scheduling;
++
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
++import ca.spottedleaf.starlight.common.light.StarLightEngine;
++import ca.spottedleaf.starlight.common.light.StarLightInterface;
++import io.papermc.paper.chunk.system.light.LightQueue;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.world.level.ChunkPos;
++import net.minecraft.world.level.chunk.ChunkAccess;
++import net.minecraft.world.level.chunk.ChunkStatus;
++import net.minecraft.world.level.chunk.ProtoChunk;
++import org.apache.logging.log4j.LogManager;
++import org.apache.logging.log4j.Logger;
++import java.util.function.BooleanSupplier;
++
++public final class ChunkLightTask extends ChunkProgressionTask {
++
++ private static final Logger LOGGER = LogManager.getLogger();
++
++ protected final ChunkAccess fromChunk;
++
++ private final LightTaskPriorityHolder priorityHolder;
++
++ public ChunkLightTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX, final int chunkZ,
++ final ChunkAccess chunk, final PrioritisedExecutor.Priority priority) {
++ super(scheduler, world, chunkX, chunkZ);
++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority " + priority);
++ }
++ this.priorityHolder = new LightTaskPriorityHolder(priority, this);
++ this.fromChunk = chunk;
++ }
++
++ @Override
++ public boolean isScheduled() {
++ return this.priorityHolder.isScheduled();
++ }
++
++ @Override
++ public ChunkStatus getTargetStatus() {
++ return ChunkStatus.LIGHT;
++ }
++
++ @Override
++ public void schedule() {
++ this.priorityHolder.schedule();
++ }
++
++ @Override
++ public void cancel() {
++ this.priorityHolder.cancel();
++ }
++
++ @Override
++ public PrioritisedExecutor.Priority getPriority() {
++ return this.priorityHolder.getPriority();
++ }
++
++ @Override
++ public void lowerPriority(final PrioritisedExecutor.Priority priority) {
++ this.priorityHolder.raisePriority(priority);
++ }
++
++ @Override
++ public void setPriority(final PrioritisedExecutor.Priority priority) {
++ this.priorityHolder.setPriority(priority);
++ }
++
++ @Override
++ public void raisePriority(final PrioritisedExecutor.Priority priority) {
++ this.priorityHolder.raisePriority(priority);
++ }
++
++ private static final class LightTaskPriorityHolder extends PriorityHolder {
++
++ protected final ChunkLightTask task;
++
++ protected LightTaskPriorityHolder(final PrioritisedExecutor.Priority priority, final ChunkLightTask task) {
++ super(priority);
++ this.task = task;
++ }
++
++ @Override
++ protected void cancelScheduled() {
++ final ChunkLightTask task = this.task;
++ task.complete(null, null);
++ }
++
++ @Override
++ protected PrioritisedExecutor.Priority getScheduledPriority() {
++ final ChunkLightTask task = this.task;
++ return task.world.getChunkSource().getLightEngine().theLightEngine.lightQueue.getPriority(task.chunkX, task.chunkZ);
++ }
++
++ @Override
++ protected void scheduleTask(final PrioritisedExecutor.Priority priority) {
++ final ChunkLightTask task = this.task;
++ final StarLightInterface starLightInterface = task.world.getChunkSource().getLightEngine().theLightEngine;
++ final LightQueue lightQueue = starLightInterface.lightQueue;
++ lightQueue.queueChunkLightTask(new ChunkPos(task.chunkX, task.chunkZ), new LightTask(starLightInterface, task), priority);
++ lightQueue.setPriority(task.chunkX, task.chunkZ, priority);
++ }
++
++ @Override
++ protected void lowerPriorityScheduled(final PrioritisedExecutor.Priority priority) {
++ final ChunkLightTask task = this.task;
++ final StarLightInterface starLightInterface = task.world.getChunkSource().getLightEngine().theLightEngine;
++ final LightQueue lightQueue = starLightInterface.lightQueue;
++ lightQueue.lowerPriority(task.chunkX, task.chunkZ, priority);
++ }
++
++ @Override
++ protected void setPriorityScheduled(final PrioritisedExecutor.Priority priority) {
++ final ChunkLightTask task = this.task;
++ final StarLightInterface starLightInterface = task.world.getChunkSource().getLightEngine().theLightEngine;
++ final LightQueue lightQueue = starLightInterface.lightQueue;
++ lightQueue.setPriority(task.chunkX, task.chunkZ, priority);
++ }
++
++ @Override
++ protected void raisePriorityScheduled(final PrioritisedExecutor.Priority priority) {
++ final ChunkLightTask task = this.task;
++ final StarLightInterface starLightInterface = task.world.getChunkSource().getLightEngine().theLightEngine;
++ final LightQueue lightQueue = starLightInterface.lightQueue;
++ lightQueue.raisePriority(task.chunkX, task.chunkZ, priority);
++ }
++ }
++
++ private static final class LightTask implements BooleanSupplier {
++
++ protected final StarLightInterface lightEngine;
++ protected final ChunkLightTask task;
++
++ public LightTask(final StarLightInterface lightEngine, final ChunkLightTask task) {
++ this.lightEngine = lightEngine;
++ this.task = task;
++ }
++
++ @Override
++ public boolean getAsBoolean() {
++ final ChunkLightTask task = this.task;
++ // executed on light thread
++ if (!task.priorityHolder.markExecuting()) {
++ // cancelled
++ return false;
++ }
++
++ try {
++ final Boolean[] emptySections = StarLightEngine.getEmptySectionsForChunk(task.fromChunk);
++
++ if (task.fromChunk.isLightCorrect() && task.fromChunk.getStatus().isOrAfter(ChunkStatus.LIGHT)) {
++ this.lightEngine.forceLoadInChunk(task.fromChunk, emptySections);
++ this.lightEngine.checkChunkEdges(task.chunkX, task.chunkZ);
++ } else {
++ task.fromChunk.setLightCorrect(false);
++ this.lightEngine.lightChunk(task.fromChunk, emptySections);
++ task.fromChunk.setLightCorrect(true);
++ }
++ // we need to advance status
++ if (task.fromChunk instanceof ProtoChunk chunk && chunk.getStatus() == ChunkStatus.LIGHT.getParent()) {
++ chunk.setStatus(ChunkStatus.LIGHT);
++ }
++ } catch (final Throwable thr) {
++ if (!(thr instanceof ThreadDeath)) {
++ LOGGER.fatal("Failed to light chunk " + task.fromChunk.getPos().toString() + " in world '" + this.lightEngine.getWorld().getWorld().getName() + "'", thr);
++ }
++
++ task.complete(null, thr);
++
++ if (thr instanceof ThreadDeath) {
++ throw (ThreadDeath)thr;
++ }
++
++ return true;
++ }
++
++ task.complete(task.fromChunk, null);
++ return true;
++ }
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLoadTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLoadTask.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..be6f3f6a57668a9bd50d0ea5f2dd2335355b69d6
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLoadTask.java
+@@ -0,0 +1,499 @@
++package io.papermc.paper.chunk.system.scheduling;
++
++import ca.spottedleaf.concurrentutil.collection.MultiThreadedQueue;
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
++import ca.spottedleaf.dataconverter.minecraft.MCDataConverter;
++import ca.spottedleaf.dataconverter.minecraft.datatypes.MCTypeRegistry;
++import com.mojang.logging.LogUtils;
++import io.papermc.paper.chunk.system.io.RegionFileIOThread;
++import io.papermc.paper.chunk.system.poi.PoiChunk;
++import net.minecraft.SharedConstants;
++import net.minecraft.core.registries.Registries;
++import net.minecraft.nbt.CompoundTag;
++import net.minecraft.server.level.ChunkMap;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.world.level.ChunkPos;
++import net.minecraft.world.level.chunk.ChunkAccess;
++import net.minecraft.world.level.chunk.ChunkStatus;
++import net.minecraft.world.level.chunk.ProtoChunk;
++import net.minecraft.world.level.chunk.UpgradeData;
++import net.minecraft.world.level.chunk.storage.ChunkSerializer;
++import net.minecraft.world.level.chunk.storage.EntityStorage;
++import net.minecraft.world.level.levelgen.blending.BlendingData;
++import org.slf4j.Logger;
++import java.lang.invoke.VarHandle;
++import java.util.Map;
++import java.util.concurrent.atomic.AtomicInteger;
++import java.util.function.Consumer;
++
++public final class ChunkLoadTask extends ChunkProgressionTask {
++
++ private static final Logger LOGGER = LogUtils.getClassLogger();
++
++ private final NewChunkHolder chunkHolder;
++ private final ChunkDataLoadTask loadTask;
++
++ private boolean cancelled;
++ private NewChunkHolder.GenericDataLoadTaskCallback entityLoadTask;
++ private NewChunkHolder.GenericDataLoadTaskCallback poiLoadTask;
++
++ protected ChunkLoadTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX, final int chunkZ,
++ final NewChunkHolder chunkHolder, final PrioritisedExecutor.Priority priority) {
++ super(scheduler, world, chunkX, chunkZ);
++ this.chunkHolder = chunkHolder;
++ this.loadTask = new ChunkDataLoadTask(scheduler, world, chunkX, chunkZ, priority);
++ this.loadTask.addCallback((final GenericDataLoadTask.TaskResult result) -> {
++ ChunkLoadTask.this.complete(result == null ? null : result.left(), result == null ? null : result.right());
++ });
++ }
++
++ @Override
++ public ChunkStatus getTargetStatus() {
++ return ChunkStatus.EMPTY;
++ }
++
++ private boolean scheduled;
++
++ @Override
++ public boolean isScheduled() {
++ return this.scheduled;
++ }
++
++ @Override
++ public void schedule() {
++ final NewChunkHolder.GenericDataLoadTaskCallback entityLoadTask;
++ final NewChunkHolder.GenericDataLoadTaskCallback poiLoadTask;
++
++ final AtomicInteger count = new AtomicInteger();
++ final Consumer> scheduleLoadTask = (final GenericDataLoadTask.TaskResult, ?> result) -> {
++ if (count.decrementAndGet() == 0) {
++ ChunkLoadTask.this.loadTask.schedule(false);
++ }
++ };
++
++ // NOTE: it is IMPOSSIBLE for getOrLoadEntityData/getOrLoadPoiData to complete synchronously, because
++ // they must schedule a task to off main or to on main to complete
++ this.scheduler.schedulingLock.lock();
++ try {
++ if (this.scheduled) {
++ throw new IllegalStateException("schedule() called twice");
++ }
++ this.scheduled = true;
++ if (this.cancelled) {
++ return;
++ }
++ if (!this.chunkHolder.isEntityChunkNBTLoaded()) {
++ entityLoadTask = this.chunkHolder.getOrLoadEntityData((Consumer)scheduleLoadTask);
++ count.setPlain(count.getPlain() + 1);
++ } else {
++ entityLoadTask = null;
++ }
++
++ if (!this.chunkHolder.isPoiChunkLoaded()) {
++ poiLoadTask = this.chunkHolder.getOrLoadPoiData((Consumer)scheduleLoadTask);
++ count.setPlain(count.getPlain() + 1);
++ } else {
++ poiLoadTask = null;
++ }
++
++ this.entityLoadTask = entityLoadTask;
++ this.poiLoadTask = poiLoadTask;
++ } finally {
++ this.scheduler.schedulingLock.unlock();
++ }
++
++ if (entityLoadTask != null) {
++ entityLoadTask.schedule();
++ }
++
++ if (poiLoadTask != null) {
++ poiLoadTask.schedule();
++ }
++
++ if (entityLoadTask == null && poiLoadTask == null) {
++ // no need to wait on those, we can schedule now
++ this.loadTask.schedule(false);
++ }
++ }
++
++ @Override
++ public void cancel() {
++ // must be before load task access, so we can synchronise with the writes to the fields
++ this.scheduler.schedulingLock.lock();
++ try {
++ this.cancelled = true;
++ } finally {
++ this.scheduler.schedulingLock.unlock();
++ }
++
++ /*
++ Note: The entityLoadTask/poiLoadTask do not complete when cancelled,
++ but this is fine because if they are successfully cancelled then
++ we will successfully cancel the load task, which will complete when cancelled
++ */
++
++ if (this.entityLoadTask != null) {
++ this.entityLoadTask.cancel();
++ }
++ if (this.poiLoadTask != null) {
++ this.poiLoadTask.cancel();
++ }
++ this.loadTask.cancel();
++ }
++
++ @Override
++ public PrioritisedExecutor.Priority getPriority() {
++ return this.loadTask.getPriority();
++ }
++
++ @Override
++ public void lowerPriority(final PrioritisedExecutor.Priority priority) {
++ final EntityDataLoadTask entityLoad = this.chunkHolder.getEntityDataLoadTask();
++ if (entityLoad != null) {
++ entityLoad.lowerPriority(priority);
++ }
++
++ final PoiDataLoadTask poiLoad = this.chunkHolder.getPoiDataLoadTask();
++
++ if (poiLoad != null) {
++ poiLoad.lowerPriority(priority);
++ }
++
++ this.loadTask.lowerPriority(priority);
++ }
++
++ @Override
++ public void setPriority(final PrioritisedExecutor.Priority priority) {
++ final EntityDataLoadTask entityLoad = this.chunkHolder.getEntityDataLoadTask();
++ if (entityLoad != null) {
++ entityLoad.setPriority(priority);
++ }
++
++ final PoiDataLoadTask poiLoad = this.chunkHolder.getPoiDataLoadTask();
++
++ if (poiLoad != null) {
++ poiLoad.setPriority(priority);
++ }
++
++ this.loadTask.setPriority(priority);
++ }
++
++ @Override
++ public void raisePriority(final PrioritisedExecutor.Priority priority) {
++ final EntityDataLoadTask entityLoad = this.chunkHolder.getEntityDataLoadTask();
++ if (entityLoad != null) {
++ entityLoad.raisePriority(priority);
++ }
++
++ final PoiDataLoadTask poiLoad = this.chunkHolder.getPoiDataLoadTask();
++
++ if (poiLoad != null) {
++ poiLoad.raisePriority(priority);
++ }
++
++ this.loadTask.raisePriority(priority);
++ }
++
++ protected static abstract class CallbackDataLoadTask extends GenericDataLoadTask {
++
++ private TaskResult result;
++ private final MultiThreadedQueue>> waiters = new MultiThreadedQueue<>();
++
++ protected volatile boolean completed;
++ protected static final VarHandle COMPLETED_HANDLE = ConcurrentUtil.getVarHandle(CallbackDataLoadTask.class, "completed", boolean.class);
++
++ protected CallbackDataLoadTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX,
++ final int chunkZ, final RegionFileIOThread.RegionFileType type,
++ final PrioritisedExecutor.Priority priority) {
++ super(scheduler, world, chunkX, chunkZ, type, priority);
++ }
++
++ public void addCallback(final Consumer> consumer) {
++ if (!this.waiters.add(consumer)) {
++ try {
++ consumer.accept(this.result);
++ } catch (final Throwable throwable) {
++ this.scheduler.unrecoverableChunkSystemFailure(this.chunkX, this.chunkZ, Map.of(
++ "Consumer", ChunkTaskScheduler.stringIfNull(consumer),
++ "Completed throwable", ChunkTaskScheduler.stringIfNull(this.result.right())
++ ), throwable);
++ if (throwable instanceof ThreadDeath) {
++ throw (ThreadDeath)throwable;
++ }
++ }
++ }
++ }
++
++ @Override
++ protected void onComplete(final TaskResult result) {
++ if ((boolean)COMPLETED_HANDLE.getAndSet((CallbackDataLoadTask)this, (boolean)true)) {
++ throw new IllegalStateException("Already completed");
++ }
++ this.result = result;
++ Consumer> consumer;
++ while ((consumer = this.waiters.pollOrBlockAdds()) != null) {
++ try {
++ consumer.accept(result);
++ } catch (final Throwable throwable) {
++ this.scheduler.unrecoverableChunkSystemFailure(this.chunkX, this.chunkZ, Map.of(
++ "Consumer", ChunkTaskScheduler.stringIfNull(consumer),
++ "Completed throwable", ChunkTaskScheduler.stringIfNull(result.right())
++ ), throwable);
++ if (throwable instanceof ThreadDeath) {
++ throw (ThreadDeath)throwable;
++ }
++ return;
++ }
++ }
++ }
++ }
++
++ public final class ChunkDataLoadTask extends CallbackDataLoadTask {
++ protected ChunkDataLoadTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX,
++ final int chunkZ, final PrioritisedExecutor.Priority priority) {
++ super(scheduler, world, chunkX, chunkZ, RegionFileIOThread.RegionFileType.CHUNK_DATA, priority);
++ }
++
++ @Override
++ protected boolean hasOffMain() {
++ return true;
++ }
++
++ @Override
++ protected boolean hasOnMain() {
++ return true;
++ }
++
++ @Override
++ protected PrioritisedExecutor.PrioritisedTask createOffMain(final Runnable run, final PrioritisedExecutor.Priority priority) {
++ return this.scheduler.loadExecutor.createTask(run, priority);
++ }
++
++ @Override
++ protected PrioritisedExecutor.PrioritisedTask createOnMain(final Runnable run, final PrioritisedExecutor.Priority priority) {
++ return this.scheduler.createChunkTask(this.chunkX, this.chunkZ, run, priority);
++ }
++
++ @Override
++ protected TaskResult completeOnMainOffMain(final ChunkSerializer.InProgressChunkHolder data, final Throwable throwable) {
++ if (data != null) {
++ return null;
++ }
++
++ final PoiChunk poiChunk = ChunkLoadTask.this.chunkHolder.getPoiChunk();
++ if (poiChunk == null) {
++ LOGGER.error("Expected poi chunk to be loaded with chunk for task " + this.toString());
++ } else if (!poiChunk.isLoaded()) {
++ // need to call poiChunk.load() on main
++ return null;
++ }
++
++ return new TaskResult<>(this.getEmptyChunk(), null);
++ }
++
++ @Override
++ protected TaskResult runOffMain(final CompoundTag data, final Throwable throwable) {
++ if (throwable != null) {
++ LOGGER.error("Failed to load chunk data for task: " + this.toString() + ", chunk data will be lost", throwable);
++ return new TaskResult<>(null, null);
++ }
++
++ if (data == null) {
++ return new TaskResult<>(null, null);
++ }
++
++ // need to convert data, and then deserialize it
++
++ try {
++ final ChunkPos chunkPos = new ChunkPos(this.chunkX, this.chunkZ);
++ final ChunkMap chunkMap = this.world.getChunkSource().chunkMap;
++ // run converters
++ // note: upgradeChunkTag copies the data already
++ final CompoundTag converted = chunkMap.upgradeChunkTag(
++ this.world.getTypeKey(), chunkMap.overworldDataStorage, data, chunkMap.generator.getTypeNameForDataFixer(),
++ chunkPos, this.world
++ );
++ // deserialize
++ final ChunkSerializer.InProgressChunkHolder chunkHolder = ChunkSerializer.loadChunk(
++ this.world, chunkMap.getPoiManager(), chunkPos, converted, true
++ );
++
++ return new TaskResult<>(chunkHolder, null);
++ } catch (final ThreadDeath death) {
++ throw death;
++ } catch (final Throwable thr2) {
++ LOGGER.error("Failed to parse chunk data for task: " + this.toString() + ", chunk data will be lost", thr2);
++ return new TaskResult<>(null, thr2);
++ }
++ }
++
++ private ProtoChunk getEmptyChunk() {
++ return new ProtoChunk(
++ new ChunkPos(this.chunkX, this.chunkZ), UpgradeData.EMPTY, this.world,
++ this.world.registryAccess().registryOrThrow(Registries.BIOME), (BlendingData)null
++ );
++ }
++
++ @Override
++ protected TaskResult runOnMain(final ChunkSerializer.InProgressChunkHolder data, final Throwable throwable) {
++ final PoiChunk poiChunk = ChunkLoadTask.this.chunkHolder.getPoiChunk();
++ if (poiChunk == null) {
++ LOGGER.error("Expected poi chunk to be loaded with chunk for task " + this.toString());
++ } else {
++ poiChunk.load();
++ }
++
++ if (data == null || data.protoChunk == null) {
++ // throwable could be non-null, but the off-main task will print its exceptions - so we don't need to care,
++ // it's handled already
++
++ return new TaskResult<>(this.getEmptyChunk(), null);
++ }
++
++ // have tasks to run (at this point, it's just the POI consistency checking)
++ try {
++ if (data.tasks != null) {
++ for (int i = 0, len = data.tasks.size(); i < len; ++i) {
++ data.tasks.poll().run();
++ }
++ }
++
++ return new TaskResult<>(data.protoChunk, null);
++ } catch (final ThreadDeath death) {
++ throw death;
++ } catch (final Throwable thr2) {
++ LOGGER.error("Failed to parse main tasks for task " + this.toString() + ", chunk data will be lost", thr2);
++ return new TaskResult<>(this.getEmptyChunk(), null);
++ }
++ }
++ }
++
++ public static final class PoiDataLoadTask extends CallbackDataLoadTask {
++ public PoiDataLoadTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX,
++ final int chunkZ, final PrioritisedExecutor.Priority priority) {
++ super(scheduler, world, chunkX, chunkZ, RegionFileIOThread.RegionFileType.POI_DATA, priority);
++ }
++
++ @Override
++ protected boolean hasOffMain() {
++ return true;
++ }
++
++ @Override
++ protected boolean hasOnMain() {
++ return false;
++ }
++
++ @Override
++ protected PrioritisedExecutor.PrioritisedTask createOffMain(final Runnable run, final PrioritisedExecutor.Priority priority) {
++ return this.scheduler.loadExecutor.createTask(run, priority);
++ }
++
++ @Override
++ protected PrioritisedExecutor.PrioritisedTask createOnMain(final Runnable run, final PrioritisedExecutor.Priority priority) {
++ throw new UnsupportedOperationException();
++ }
++
++ @Override
++ protected TaskResult completeOnMainOffMain(final PoiChunk data, final Throwable throwable) {
++ throw new UnsupportedOperationException();
++ }
++
++ @Override
++ protected TaskResult runOffMain(CompoundTag data, final Throwable throwable) {
++ if (throwable != null) {
++ LOGGER.error("Failed to load poi data for task: " + this.toString() + ", poi data will be lost", throwable);
++ return new TaskResult<>(PoiChunk.empty(this.world, this.chunkX, this.chunkZ), null);
++ }
++
++ if (data == null || data.isEmpty()) {
++ // nothing to do
++ return new TaskResult<>(PoiChunk.empty(this.world, this.chunkX, this.chunkZ), null);
++ }
++
++ try {
++ data = data.copy(); // coming from the I/O thread, so we need to copy
++ // run converters
++ final int dataVersion = !data.contains(SharedConstants.DATA_VERSION_TAG, 99) ? 1945 : data.getInt(SharedConstants.DATA_VERSION_TAG);
++ final CompoundTag converted = MCDataConverter.convertTag(
++ MCTypeRegistry.POI_CHUNK, data, dataVersion, SharedConstants.getCurrentVersion().getDataVersion().getVersion()
++ );
++
++ // now we need to parse it
++ return new TaskResult<>(PoiChunk.parse(this.world, this.chunkX, this.chunkZ, converted), null);
++ } catch (final ThreadDeath death) {
++ throw death;
++ } catch (final Throwable thr2) {
++ LOGGER.error("Failed to run parse poi data for task: " + this.toString() + ", poi data will be lost", thr2);
++ return new TaskResult<>(PoiChunk.empty(this.world, this.chunkX, this.chunkZ), null);
++ }
++ }
++
++ @Override
++ protected TaskResult runOnMain(final PoiChunk data, final Throwable throwable) {
++ throw new UnsupportedOperationException();
++ }
++ }
++
++ public static final class EntityDataLoadTask extends CallbackDataLoadTask {
++
++ public EntityDataLoadTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX,
++ final int chunkZ, final PrioritisedExecutor.Priority priority) {
++ super(scheduler, world, chunkX, chunkZ, RegionFileIOThread.RegionFileType.ENTITY_DATA, priority);
++ }
++
++ @Override
++ protected boolean hasOffMain() {
++ return true;
++ }
++
++ @Override
++ protected boolean hasOnMain() {
++ return false;
++ }
++
++ @Override
++ protected PrioritisedExecutor.PrioritisedTask createOffMain(final Runnable run, final PrioritisedExecutor.Priority priority) {
++ return this.scheduler.loadExecutor.createTask(run, priority);
++ }
++
++ @Override
++ protected PrioritisedExecutor.PrioritisedTask createOnMain(final Runnable run, final PrioritisedExecutor.Priority priority) {
++ throw new UnsupportedOperationException();
++ }
++
++ @Override
++ protected TaskResult completeOnMainOffMain(final CompoundTag data, final Throwable throwable) {
++ throw new UnsupportedOperationException();
++ }
++
++ @Override
++ protected TaskResult runOffMain(final CompoundTag data, final Throwable throwable) {
++ if (throwable != null) {
++ LOGGER.error("Failed to load entity data for task: " + this.toString() + ", entity data will be lost", throwable);
++ return new TaskResult<>(null, null);
++ }
++
++ if (data == null || data.isEmpty()) {
++ // nothing to do
++ return new TaskResult<>(null, null);
++ }
++
++ try {
++ // note: data comes from the I/O thread, so we need to copy it
++ return new TaskResult<>(EntityStorage.upgradeChunkTag(data.copy()), null);
++ } catch (final ThreadDeath death) {
++ throw death;
++ } catch (final Throwable thr2) {
++ LOGGER.error("Failed to run converters for entity data for task: " + this.toString() + ", entity data will be lost", thr2);
++ return new TaskResult<>(null, thr2);
++ }
++ }
++
++ @Override
++ protected TaskResult runOnMain(final CompoundTag data, final Throwable throwable) {
++ throw new UnsupportedOperationException();
++ }
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkProgressionTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkProgressionTask.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..322675a470eacbf0e5452f4009c643f2d0b4ce24
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkProgressionTask.java
+@@ -0,0 +1,105 @@
++package io.papermc.paper.chunk.system.scheduling;
++
++import ca.spottedleaf.concurrentutil.collection.MultiThreadedQueue;
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.world.level.chunk.ChunkAccess;
++import net.minecraft.world.level.chunk.ChunkStatus;
++import java.lang.invoke.VarHandle;
++import java.util.Map;
++import java.util.function.BiConsumer;
++
++public abstract class ChunkProgressionTask {
++
++ private final MultiThreadedQueue> waiters = new MultiThreadedQueue<>();
++ private ChunkAccess completedChunk;
++ private Throwable completedThrowable;
++
++ protected final ChunkTaskScheduler scheduler;
++ protected final ServerLevel world;
++ protected final int chunkX;
++ protected final int chunkZ;
++
++ protected volatile boolean completed;
++ protected static final VarHandle COMPLETED_HANDLE = ConcurrentUtil.getVarHandle(ChunkProgressionTask.class, "completed", boolean.class);
++
++ protected ChunkProgressionTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX, final int chunkZ) {
++ this.scheduler = scheduler;
++ this.world = world;
++ this.chunkX = chunkX;
++ this.chunkZ = chunkZ;
++ }
++
++ // Used only for debug json
++ public abstract boolean isScheduled();
++
++ // Note: It is the responsibility of the task to set the chunk's status once it has completed
++ public abstract ChunkStatus getTargetStatus();
++
++ /* Only executed once */
++ /* Implementations must be prepared to handle cases where cancel() is called before schedule() */
++ public abstract void schedule();
++
++ /* May be called multiple times */
++ public abstract void cancel();
++
++ public abstract PrioritisedExecutor.Priority getPriority();
++
++ /* Schedule lock is always held for the priority update calls */
++
++ public abstract void lowerPriority(final PrioritisedExecutor.Priority priority);
++
++ public abstract void setPriority(final PrioritisedExecutor.Priority priority);
++
++ public abstract void raisePriority(final PrioritisedExecutor.Priority priority);
++
++ public final void onComplete(final BiConsumer onComplete) {
++ if (!this.waiters.add(onComplete)) {
++ try {
++ onComplete.accept(this.completedChunk, this.completedThrowable);
++ } catch (final Throwable throwable) {
++ this.scheduler.unrecoverableChunkSystemFailure(this.chunkX, this.chunkZ, Map.of(
++ "Consumer", ChunkTaskScheduler.stringIfNull(onComplete),
++ "Completed throwable", ChunkTaskScheduler.stringIfNull(this.completedThrowable)
++ ), throwable);
++ if (throwable instanceof ThreadDeath) {
++ throw (ThreadDeath)throwable;
++ }
++ }
++ }
++ }
++
++ protected final void complete(final ChunkAccess chunk, final Throwable throwable) {
++ try {
++ this.complete0(chunk, throwable);
++ } catch (final Throwable thr2) {
++ this.scheduler.unrecoverableChunkSystemFailure(this.chunkX, this.chunkZ, Map.of(
++ "Completed throwable", ChunkTaskScheduler.stringIfNull(throwable)
++ ), thr2);
++ if (thr2 instanceof ThreadDeath) {
++ throw (ThreadDeath)thr2;
++ }
++ }
++ }
++
++ private void complete0(final ChunkAccess chunk, final Throwable throwable) {
++ if ((boolean)COMPLETED_HANDLE.getAndSet((ChunkProgressionTask)this, (boolean)true)) {
++ throw new IllegalStateException("Already completed");
++ }
++ this.completedChunk = chunk;
++ this.completedThrowable = throwable;
++
++ BiConsumer consumer;
++ while ((consumer = this.waiters.pollOrBlockAdds()) != null) {
++ consumer.accept(chunk, throwable);
++ }
++ }
++
++ @Override
++ public String toString() {
++ return "ChunkProgressionTask{class: " + this.getClass().getName() + ", for world: " + this.world.getWorld().getName() +
++ ", chunk: (" + this.chunkX + "," + this.chunkZ + "), hashcode: " + System.identityHashCode(this) + ", priority: " + this.getPriority() +
++ ", status: " + this.getTargetStatus().toString() + ", scheduled: " + this.isScheduled() + "}";
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkTaskScheduler.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkTaskScheduler.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..84d6af5c28cd0e81d50701bebe122f462720fbf8
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkTaskScheduler.java
+@@ -0,0 +1,775 @@
++package io.papermc.paper.chunk.system.scheduling;
++
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedThreadPool;
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedThreadedTaskQueue;
++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
++import com.mojang.logging.LogUtils;
++import io.papermc.paper.configuration.GlobalConfiguration;
++import io.papermc.paper.util.CoordinateUtils;
++import io.papermc.paper.util.TickThread;
++import net.minecraft.CrashReport;
++import net.minecraft.CrashReportCategory;
++import net.minecraft.ReportedException;
++import io.papermc.paper.util.MCUtil;
++import net.minecraft.server.MinecraftServer;
++import net.minecraft.server.level.ChunkHolder;
++import net.minecraft.server.level.ChunkMap;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.server.level.TicketType;
++import net.minecraft.world.level.ChunkPos;
++import net.minecraft.world.level.chunk.ChunkAccess;
++import net.minecraft.world.level.chunk.ChunkStatus;
++import net.minecraft.world.level.chunk.LevelChunk;
++import org.bukkit.Bukkit;
++import org.slf4j.Logger;
++import java.io.File;
++import java.util.ArrayDeque;
++import java.util.ArrayList;
++import java.util.Arrays;
++import java.util.Collections;
++import java.util.List;
++import java.util.Map;
++import java.util.Objects;
++import java.util.concurrent.atomic.AtomicBoolean;
++import java.util.concurrent.atomic.AtomicLong;
++import java.util.concurrent.locks.ReentrantLock;
++import java.util.function.BooleanSupplier;
++import java.util.function.Consumer;
++
++public final class ChunkTaskScheduler {
++
++ private static final Logger LOGGER = LogUtils.getClassLogger();
++
++ static int newChunkSystemIOThreads;
++ static int newChunkSystemWorkerThreads;
++ static int newChunkSystemGenParallelism;
++ static int newChunkSystemLoadParallelism;
++
++ public static ca.spottedleaf.concurrentutil.executor.standard.PrioritisedThreadPool workerThreads;
++
++ private static boolean initialised = false;
++
++ public static void init(final GlobalConfiguration.ChunkSystem config) {
++ if (initialised) {
++ return;
++ }
++ initialised = true;
++ newChunkSystemIOThreads = config.ioThreads;
++ newChunkSystemWorkerThreads = config.workerThreads;
++ if (newChunkSystemIOThreads < 0) {
++ newChunkSystemIOThreads = 1;
++ } else {
++ newChunkSystemIOThreads = Math.max(1, newChunkSystemIOThreads);
++ }
++ int defaultWorkerThreads = Runtime.getRuntime().availableProcessors() / 2;
++ if (defaultWorkerThreads <= 4) {
++ defaultWorkerThreads = defaultWorkerThreads <= 3 ? 1 : 2;
++ } else {
++ defaultWorkerThreads = defaultWorkerThreads / 2;
++ }
++ defaultWorkerThreads = Integer.getInteger("Paper.WorkerThreadCount", Integer.valueOf(defaultWorkerThreads));
++
++ if (newChunkSystemWorkerThreads < 0) {
++ newChunkSystemWorkerThreads = defaultWorkerThreads;
++ } else {
++ newChunkSystemWorkerThreads = Math.max(1, newChunkSystemWorkerThreads);
++ }
++
++ String newChunkSystemGenParallelism = config.genParallelism;
++ if (newChunkSystemGenParallelism.equalsIgnoreCase("default")) {
++ newChunkSystemGenParallelism = "true";
++ }
++ boolean useParallelGen;
++ if (newChunkSystemGenParallelism.equalsIgnoreCase("on") || newChunkSystemGenParallelism.equalsIgnoreCase("enabled")
++ || newChunkSystemGenParallelism.equalsIgnoreCase("true")) {
++ useParallelGen = true;
++ } else if (newChunkSystemGenParallelism.equalsIgnoreCase("off") || newChunkSystemGenParallelism.equalsIgnoreCase("disabled")
++ || newChunkSystemGenParallelism.equalsIgnoreCase("false")) {
++ useParallelGen = false;
++ } else {
++ throw new IllegalStateException("Invalid option for gen-parallelism: must be one of [on, off, enabled, disabled, true, false, default]");
++ }
++
++ ChunkTaskScheduler.newChunkSystemGenParallelism = useParallelGen ? newChunkSystemWorkerThreads : 1;
++ ChunkTaskScheduler.newChunkSystemLoadParallelism = newChunkSystemWorkerThreads;
++
++ io.papermc.paper.chunk.system.io.RegionFileIOThread.init(newChunkSystemIOThreads);
++ workerThreads = new ca.spottedleaf.concurrentutil.executor.standard.PrioritisedThreadPool(
++ "Paper Chunk System Worker Pool", newChunkSystemWorkerThreads,
++ (final Thread thread, final Integer id) -> {
++ thread.setPriority(Thread.NORM_PRIORITY - 2);
++ thread.setName("Tuinity Chunk System Worker #" + id.intValue());
++ thread.setUncaughtExceptionHandler(io.papermc.paper.chunk.system.scheduling.NewChunkHolder.CHUNKSYSTEM_UNCAUGHT_EXCEPTION_HANDLER);
++ }, (long)(20.0e6)); // 20ms
++
++ LOGGER.info("Chunk system is using " + newChunkSystemIOThreads + " I/O threads, " + newChunkSystemWorkerThreads + " worker threads, and gen parallelism of " + ChunkTaskScheduler.newChunkSystemGenParallelism + " threads");
++ }
++
++ public final ServerLevel world;
++ public final PrioritisedThreadPool workers;
++ public final PrioritisedThreadPool.PrioritisedPoolExecutor lightExecutor;
++ public final PrioritisedThreadPool.PrioritisedPoolExecutor genExecutor;
++ public final PrioritisedThreadPool.PrioritisedPoolExecutor parallelGenExecutor;
++ public final PrioritisedThreadPool.PrioritisedPoolExecutor loadExecutor;
++
++ private final PrioritisedThreadedTaskQueue mainThreadExecutor = new PrioritisedThreadedTaskQueue();
++
++ final ReentrantLock schedulingLock = new ReentrantLock();
++ public final ChunkHolderManager chunkHolderManager;
++
++ static {
++ ChunkStatus.EMPTY.writeRadius = 0;
++ ChunkStatus.STRUCTURE_STARTS.writeRadius = 0;
++ ChunkStatus.STRUCTURE_REFERENCES.writeRadius = 0;
++ ChunkStatus.BIOMES.writeRadius = 0;
++ ChunkStatus.NOISE.writeRadius = 0;
++ ChunkStatus.SURFACE.writeRadius = 0;
++ ChunkStatus.CARVERS.writeRadius = 0;
++ ChunkStatus.FEATURES.writeRadius = 1;
++ ChunkStatus.INITIALIZE_LIGHT.writeRadius = 0;
++ ChunkStatus.LIGHT.writeRadius = 2;
++ ChunkStatus.SPAWN.writeRadius = 0;
++ ChunkStatus.FULL.writeRadius = 0;
++
++ /*
++ It's important that the neighbour read radius is taken into account. If _any_ later status is using some chunk as
++ a neighbour, it must be also safe if that neighbour is being generated. i.e for any status later than FEATURES,
++ for a status to be parallel safe it must not read the block data from its neighbours.
++ */
++ final List parallelCapableStatus = Arrays.asList(
++ // No-op executor.
++ ChunkStatus.EMPTY,
++
++ // This is parallel capable, as CB has fixed the concurrency issue with stronghold generations.
++ // Does not touch neighbour chunks.
++ ChunkStatus.STRUCTURE_STARTS,
++
++ // Surprisingly this is parallel capable. It is simply reading the already-created structure starts
++ // into the structure references for the chunk. So while it reads from it neighbours, its neighbours
++ // will not change, even if executed in parallel.
++ ChunkStatus.STRUCTURE_REFERENCES,
++
++ // Safe. Mojang runs it in parallel as well.
++ ChunkStatus.BIOMES,
++
++ // Safe. Mojang runs it in parallel as well.
++ ChunkStatus.NOISE,
++
++ // Parallel safe. Only touches the target chunk. Biome retrieval is now noise based, which is
++ // completely thread-safe.
++ ChunkStatus.SURFACE,
++
++ // No global state is modified in the carvers. It only touches the specified chunk. So it is parallel safe.
++ ChunkStatus.CARVERS,
++
++ // FEATURES is not parallel safe. It writes to neighbours.
++
++ // no-op executor
++ ChunkStatus.INITIALIZE_LIGHT
++
++ // LIGHT is not parallel safe. It also doesn't run on the generation executor, so no point.
++
++ // Only writes to the specified chunk. State is not read by later statuses. Parallel safe.
++ // Note: it may look unsafe because it writes to a worldgenregion, but the region size is always 0 -
++ // see the task margin.
++ // However, if the neighbouring FEATURES chunk is unloaded, but then fails to load in again (for whatever
++ // reason), then it would write to this chunk - and since this status reads blocks from itself, it's not
++ // safe to execute this in parallel.
++ // SPAWN
++
++ // FULL is executed on main.
++ );
++
++ for (final ChunkStatus status : parallelCapableStatus) {
++ status.isParallelCapable = true;
++ }
++ }
++
++ public ChunkTaskScheduler(final ServerLevel world, final PrioritisedThreadPool workers) {
++ this.world = world;
++ this.workers = workers;
++
++ final String worldName = world.getWorld().getName();
++ this.genExecutor = workers.createExecutor("Chunk single-threaded generation executor for world '" + worldName + "'", 1);
++ // same as genExecutor, as there are race conditions between updating blocks in FEATURE status while lighting chunks
++ this.lightExecutor = this.genExecutor;
++ this.parallelGenExecutor = newChunkSystemGenParallelism <= 1 ? this.genExecutor
++ : workers.createExecutor("Chunk parallel generation executor for world '" + worldName + "'", newChunkSystemGenParallelism);
++ this.loadExecutor = workers.createExecutor("Chunk load executor for world '" + worldName + "'", newChunkSystemLoadParallelism);
++ this.chunkHolderManager = new ChunkHolderManager(world, this);
++ }
++
++ private final AtomicBoolean failedChunkSystem = new AtomicBoolean();
++
++ public static Object stringIfNull(final Object obj) {
++ return obj == null ? "null" : obj;
++ }
++
++ public void unrecoverableChunkSystemFailure(final int chunkX, final int chunkZ, final Map objectsOfInterest, final Throwable thr) {
++ final NewChunkHolder holder = this.chunkHolderManager.getChunkHolder(chunkX, chunkZ);
++ LOGGER.error("Chunk system error at chunk (" + chunkX + "," + chunkZ + "), holder: " + holder + ", exception:", new Throwable(thr));
++
++ if (this.failedChunkSystem.getAndSet(true)) {
++ return;
++ }
++
++ final ReportedException reportedException = thr instanceof ReportedException ? (ReportedException)thr : new ReportedException(new CrashReport("Chunk system error", thr));
++
++ CrashReportCategory crashReportCategory = reportedException.getReport().addCategory("Chunk system details");
++ crashReportCategory.setDetail("Chunk coordinate", new ChunkPos(chunkX, chunkZ).toString());
++ crashReportCategory.setDetail("ChunkHolder", Objects.toString(holder));
++ crashReportCategory.setDetail("unrecoverableChunkSystemFailure caller thread", Thread.currentThread().getName());
++
++ crashReportCategory = reportedException.getReport().addCategory("Chunk System Objects of Interest");
++ for (final Map.Entry entry : objectsOfInterest.entrySet()) {
++ if (entry.getValue() instanceof Throwable thrObject) {
++ crashReportCategory.setDetailError(Objects.toString(entry.getKey()), thrObject);
++ } else {
++ crashReportCategory.setDetail(Objects.toString(entry.getKey()), Objects.toString(entry.getValue()));
++ }
++ }
++
++ final Runnable crash = () -> {
++ throw new RuntimeException("Chunk system crash propagated from unrecoverableChunkSystemFailure", reportedException);
++ };
++
++ // this may not be good enough, specifically thanks to stupid ass plugins swallowing exceptions
++ this.scheduleChunkTask(chunkX, chunkZ, crash, PrioritisedExecutor.Priority.BLOCKING);
++ // so, make the main thread pick it up
++ MinecraftServer.chunkSystemCrash = new RuntimeException("Chunk system crash propagated from unrecoverableChunkSystemFailure", reportedException);
++ }
++
++ public boolean executeMainThreadTask() {
++ TickThread.ensureTickThread("Cannot execute main thread task off-main");
++ return this.mainThreadExecutor.executeTask();
++ }
++
++ public void raisePriority(final int x, final int z, final PrioritisedExecutor.Priority priority) {
++ this.chunkHolderManager.raisePriority(x, z, priority);
++ }
++
++ public void setPriority(final int x, final int z, final PrioritisedExecutor.Priority priority) {
++ this.chunkHolderManager.setPriority(x, z, priority);
++ }
++
++ public void lowerPriority(final int x, final int z, final PrioritisedExecutor.Priority priority) {
++ this.chunkHolderManager.lowerPriority(x, z, priority);
++ }
++
++ private final AtomicLong chunkLoadCounter = new AtomicLong();
++
++ public void scheduleTickingState(final int chunkX, final int chunkZ, final ChunkHolder.FullChunkStatus toStatus,
++ final boolean addTicket, final PrioritisedExecutor.Priority priority,
++ final Consumer onComplete) {
++ if (!TickThread.isTickThread()) {
++ this.scheduleChunkTask(chunkX, chunkZ, () -> {
++ ChunkTaskScheduler.this.scheduleTickingState(chunkX, chunkZ, toStatus, addTicket, priority, onComplete);
++ }, priority);
++ return;
++ }
++ if (this.chunkHolderManager.ticketLock.isHeldByCurrentThread()) {
++ throw new IllegalStateException("Cannot schedule chunk load during ticket level update");
++ }
++ if (this.schedulingLock.isHeldByCurrentThread()) {
++ throw new IllegalStateException("Cannot schedule chunk loading recursively");
++ }
++
++ if (toStatus == ChunkHolder.FullChunkStatus.INACCESSIBLE) {
++ throw new IllegalArgumentException("Cannot wait for INACCESSIBLE status");
++ }
++
++ final int minLevel = 33 - (toStatus.ordinal() - 1);
++ final Long chunkReference = addTicket ? Long.valueOf(this.chunkLoadCounter.getAndIncrement()) : null;
++ final long chunkKey = CoordinateUtils.getChunkKey(chunkX, chunkZ);
++
++ if (addTicket) {
++ this.chunkHolderManager.addTicketAtLevel(TicketType.CHUNK_LOAD, chunkKey, minLevel, chunkReference);
++ this.chunkHolderManager.processTicketUpdates();
++ }
++
++ final Consumer loadCallback = (final LevelChunk chunk) -> {
++ try {
++ if (onComplete != null) {
++ onComplete.accept(chunk);
++ }
++ } finally {
++ if (addTicket) {
++ ChunkTaskScheduler.this.chunkHolderManager.addAndRemoveTickets(chunkKey,
++ TicketType.UNKNOWN, minLevel, new ChunkPos(chunkKey),
++ TicketType.CHUNK_LOAD, minLevel, chunkReference
++ );
++ }
++ }
++ };
++
++ final boolean scheduled;
++ final LevelChunk chunk;
++ this.chunkHolderManager.ticketLock.lock();
++ try {
++ this.schedulingLock.lock();
++ try {
++ final NewChunkHolder chunkHolder = this.chunkHolderManager.getChunkHolder(chunkKey);
++ if (chunkHolder == null || chunkHolder.getTicketLevel() > minLevel) {
++ scheduled = false;
++ chunk = null;
++ } else {
++ final ChunkHolder.FullChunkStatus currStatus = chunkHolder.getChunkStatus();
++ if (currStatus.isOrAfter(toStatus)) {
++ scheduled = false;
++ chunk = (LevelChunk)chunkHolder.getCurrentChunk();
++ } else {
++ scheduled = true;
++ chunk = null;
++
++ final int radius = toStatus.ordinal() - 1; // 0 -> BORDER, 1 -> TICKING, 2 -> ENTITY_TICKING
++ for (int dz = -radius; dz <= radius; ++dz) {
++ for (int dx = -radius; dx <= radius; ++dx) {
++ final NewChunkHolder neighbour =
++ (dx | dz) == 0 ? chunkHolder : this.chunkHolderManager.getChunkHolder(dx + chunkX, dz + chunkZ);
++ if (neighbour != null) {
++ neighbour.raisePriority(priority);
++ }
++ }
++ }
++
++ // ticket level should schedule for us
++ chunkHolder.addFullStatusConsumer(toStatus, loadCallback);
++ }
++ }
++ } finally {
++ this.schedulingLock.unlock();
++ }
++ } finally {
++ this.chunkHolderManager.ticketLock.unlock();
++ }
++
++ if (!scheduled) {
++ // couldn't schedule
++ try {
++ loadCallback.accept(chunk);
++ } catch (final ThreadDeath thr) {
++ throw thr;
++ } catch (final Throwable thr) {
++ LOGGER.error("Failed to process chunk full status callback", thr);
++ }
++ }
++ }
++
++ public void scheduleChunkLoad(final int chunkX, final int chunkZ, final boolean gen, final ChunkStatus toStatus, final boolean addTicket,
++ final PrioritisedExecutor.Priority priority, final Consumer onComplete) {
++ if (gen) {
++ this.scheduleChunkLoad(chunkX, chunkZ, toStatus, addTicket, priority, onComplete);
++ return;
++ }
++ this.scheduleChunkLoad(chunkX, chunkZ, ChunkStatus.EMPTY, addTicket, priority, (final ChunkAccess chunk) -> {
++ if (chunk == null) {
++ onComplete.accept(null);
++ } else {
++ if (chunk.getStatus().isOrAfter(toStatus)) {
++ this.scheduleChunkLoad(chunkX, chunkZ, toStatus, addTicket, priority, onComplete);
++ } else {
++ onComplete.accept(null);
++ }
++ }
++ });
++ }
++
++ public void scheduleChunkLoad(final int chunkX, final int chunkZ, final ChunkStatus toStatus, final boolean addTicket,
++ final PrioritisedExecutor.Priority priority, final Consumer onComplete) {
++ if (!TickThread.isTickThread()) {
++ this.scheduleChunkTask(chunkX, chunkZ, () -> {
++ ChunkTaskScheduler.this.scheduleChunkLoad(chunkX, chunkZ, toStatus, addTicket, priority, onComplete);
++ }, priority);
++ return;
++ }
++ if (this.chunkHolderManager.ticketLock.isHeldByCurrentThread()) {
++ throw new IllegalStateException("Cannot schedule chunk load during ticket level update");
++ }
++ if (this.schedulingLock.isHeldByCurrentThread()) {
++ throw new IllegalStateException("Cannot schedule chunk loading recursively");
++ }
++
++ if (toStatus == ChunkStatus.FULL) {
++ this.scheduleTickingState(chunkX, chunkZ, ChunkHolder.FullChunkStatus.BORDER, addTicket, priority, (Consumer)onComplete);
++ return;
++ }
++
++ final int minLevel = 33 + ChunkStatus.getDistance(toStatus);
++ final Long chunkReference = addTicket ? Long.valueOf(this.chunkLoadCounter.getAndIncrement()) : null;
++ final long chunkKey = CoordinateUtils.getChunkKey(chunkX, chunkZ);
++
++ if (addTicket) {
++ this.chunkHolderManager.addTicketAtLevel(TicketType.CHUNK_LOAD, chunkKey, minLevel, chunkReference);
++ this.chunkHolderManager.processTicketUpdates();
++ }
++
++ final Consumer loadCallback = (final ChunkAccess chunk) -> {
++ try {
++ if (onComplete != null) {
++ onComplete.accept(chunk);
++ }
++ } finally {
++ if (addTicket) {
++ ChunkTaskScheduler.this.chunkHolderManager.addAndRemoveTickets(chunkKey,
++ TicketType.UNKNOWN, minLevel, new ChunkPos(chunkKey),
++ TicketType.CHUNK_LOAD, minLevel, chunkReference
++ );
++ }
++ }
++ };
++
++ final List tasks = new ArrayList<>();
++
++ final boolean scheduled;
++ final ChunkAccess chunk;
++ this.chunkHolderManager.ticketLock.lock();
++ try {
++ this.schedulingLock.lock();
++ try {
++ final NewChunkHolder chunkHolder = this.chunkHolderManager.getChunkHolder(chunkKey);
++ if (chunkHolder == null || chunkHolder.getTicketLevel() > minLevel) {
++ scheduled = false;
++ chunk = null;
++ } else {
++ final ChunkStatus genStatus = chunkHolder.getCurrentGenStatus();
++ if (genStatus != null && genStatus.isOrAfter(toStatus)) {
++ scheduled = false;
++ chunk = chunkHolder.getCurrentChunk();
++ } else {
++ scheduled = true;
++ chunk = null;
++ chunkHolder.raisePriority(priority);
++
++ if (!chunkHolder.upgradeGenTarget(toStatus)) {
++ this.schedule(chunkX, chunkZ, toStatus, chunkHolder, tasks);
++ }
++ chunkHolder.addStatusConsumer(toStatus, loadCallback);
++ }
++ }
++ } finally {
++ this.schedulingLock.unlock();
++ }
++ } finally {
++ this.chunkHolderManager.ticketLock.unlock();
++ }
++
++ for (int i = 0, len = tasks.size(); i < len; ++i) {
++ tasks.get(i).schedule();
++ }
++
++ if (!scheduled) {
++ // couldn't schedule
++ try {
++ loadCallback.accept(chunk);
++ } catch (final ThreadDeath thr) {
++ throw thr;
++ } catch (final Throwable thr) {
++ LOGGER.error("Failed to process chunk status callback", thr);
++ }
++ }
++ }
++
++ private ChunkProgressionTask createTask(final int chunkX, final int chunkZ, final ChunkAccess chunk,
++ final NewChunkHolder chunkHolder, final List neighbours,
++ final ChunkStatus toStatus, final PrioritisedExecutor.Priority initialPriority) {
++ if (toStatus == ChunkStatus.EMPTY) {
++ return new ChunkLoadTask(this, this.world, chunkX, chunkZ, chunkHolder, initialPriority);
++ }
++ if (toStatus == ChunkStatus.LIGHT) {
++ return new ChunkLightTask(this, this.world, chunkX, chunkZ, chunk, initialPriority);
++ }
++ if (toStatus == ChunkStatus.FULL) {
++ return new ChunkFullTask(this, this.world, chunkX, chunkZ, chunkHolder, chunk, initialPriority);
++ }
++
++ return new ChunkUpgradeGenericStatusTask(this, this.world, chunkX, chunkZ, chunk, neighbours, toStatus, initialPriority);
++ }
++
++ ChunkProgressionTask schedule(final int chunkX, final int chunkZ, final ChunkStatus targetStatus, final NewChunkHolder chunkHolder,
++ final List allTasks) {
++ return this.schedule(chunkX, chunkZ, targetStatus, chunkHolder, allTasks, chunkHolder.getEffectivePriority());
++ }
++
++ // rets new task scheduled for the _specified_ chunk
++ // note: this must hold the scheduling lock
++ // minPriority is only used to pass the priority through to neighbours, as priority calculation has not yet been done
++ // schedule will ignore the generation target, so it should be checked by the caller to ensure the target is not regressed!
++ private ChunkProgressionTask schedule(final int chunkX, final int chunkZ, final ChunkStatus targetStatus,
++ final NewChunkHolder chunkHolder, final List allTasks,
++ final PrioritisedExecutor.Priority minPriority) {
++ if (!this.schedulingLock.isHeldByCurrentThread()) {
++ throw new IllegalStateException("Not holding scheduling lock");
++ }
++
++ if (chunkHolder.hasGenerationTask()) {
++ chunkHolder.upgradeGenTarget(targetStatus);
++ return null;
++ }
++
++ final PrioritisedExecutor.Priority requestedPriority = PrioritisedExecutor.Priority.max(minPriority, chunkHolder.getEffectivePriority());
++ final ChunkStatus currentGenStatus = chunkHolder.getCurrentGenStatus();
++ final ChunkAccess chunk = chunkHolder.getCurrentChunk();
++
++ if (currentGenStatus == null) {
++ // not yet loaded
++ final ChunkProgressionTask task = this.createTask(
++ chunkX, chunkZ, chunk, chunkHolder, Collections.emptyList(), ChunkStatus.EMPTY, requestedPriority
++ );
++
++ allTasks.add(task);
++
++ final List chunkHolderNeighbours = new ArrayList<>(1);
++ chunkHolderNeighbours.add(chunkHolder);
++
++ chunkHolder.setGenerationTarget(targetStatus);
++ chunkHolder.setGenerationTask(task, ChunkStatus.EMPTY, chunkHolderNeighbours);
++
++ return task;
++ }
++
++ if (currentGenStatus.isOrAfter(targetStatus)) {
++ // nothing to do
++ return null;
++ }
++
++ // we know for sure now that we want to schedule _something_, so set the target
++ chunkHolder.setGenerationTarget(targetStatus);
++
++ final ChunkStatus chunkRealStatus = chunk.getStatus();
++ final ChunkStatus toStatus = currentGenStatus.getNextStatus();
++
++ // if this chunk has already generated up to or past the specified status, then we don't
++ // need the neighbours AT ALL.
++ final int neighbourReadRadius = chunkRealStatus.isOrAfter(toStatus) ? toStatus.loadRange : toStatus.getRange();
++
++ boolean unGeneratedNeighbours = false;
++
++ // copied from MCUtil.getSpiralOutChunks
++ for (int r = 1; r <= neighbourReadRadius; r++) {
++ int x = -r;
++ int z = r;
++
++ // Iterates the edge of half of the box; then negates for other half.
++ while (x <= r && z > -r) {
++ final int radius = Math.max(Math.abs(x), Math.abs(z));
++ final ChunkStatus requiredNeighbourStatus = ChunkMap.getDependencyStatus(toStatus, radius);
++
++ unGeneratedNeighbours |= this.checkNeighbour(
++ chunkX + x, chunkZ + z, requiredNeighbourStatus, chunkHolder, allTasks, requestedPriority
++ );
++ unGeneratedNeighbours |= this.checkNeighbour(
++ chunkX - x, chunkZ - z, requiredNeighbourStatus, chunkHolder, allTasks, requestedPriority
++ );
++
++ if (x < r) {
++ x++;
++ } else {
++ z--;
++ }
++ }
++ }
++
++ if (unGeneratedNeighbours) {
++ // can't schedule, but neighbour completion will schedule for us when they're ALL done
++
++ // propagate our priority to neighbours
++ chunkHolder.recalculateNeighbourPriorities();
++ return null;
++ }
++
++ // need to gather neighbours
++
++ final List neighbours;
++ final List chunkHolderNeighbours;
++ if (neighbourReadRadius <= 0) {
++ neighbours = new ArrayList<>(1);
++ chunkHolderNeighbours = new ArrayList<>(1);
++ neighbours.add(chunk);
++ chunkHolderNeighbours.add(chunkHolder);
++ } else {
++ // the iteration order is _very_ important, as all generation statuses expect a certain order such that:
++ // chunkAtRelative = neighbours.get(relX + relZ * (2 * radius + 1))
++ neighbours = new ArrayList<>((2 * neighbourReadRadius + 1) * (2 * neighbourReadRadius + 1));
++ chunkHolderNeighbours = new ArrayList<>((2 * neighbourReadRadius + 1) * (2 * neighbourReadRadius + 1));
++ for (int dz = -neighbourReadRadius; dz <= neighbourReadRadius; ++dz) {
++ for (int dx = -neighbourReadRadius; dx <= neighbourReadRadius; ++dx) {
++ final NewChunkHolder holder = (dx | dz) == 0 ? chunkHolder : this.chunkHolderManager.getChunkHolder(dx + chunkX, dz + chunkZ);
++ neighbours.add(holder.getChunkForNeighbourAccess());
++ chunkHolderNeighbours.add(holder);
++ }
++ }
++ }
++
++ final ChunkProgressionTask task = this.createTask(chunkX, chunkZ, chunk, chunkHolder, neighbours, toStatus, chunkHolder.getEffectivePriority());
++ allTasks.add(task);
++
++ chunkHolder.setGenerationTask(task, toStatus, chunkHolderNeighbours);
++
++ return task;
++ }
++
++ // rets true if the neighbour is not at the required status, false otherwise
++ private boolean checkNeighbour(final int chunkX, final int chunkZ, final ChunkStatus requiredStatus, final NewChunkHolder center,
++ final List tasks, final PrioritisedExecutor.Priority minPriority) {
++ final NewChunkHolder chunkHolder = this.chunkHolderManager.getChunkHolder(chunkX, chunkZ);
++
++ if (chunkHolder == null) {
++ throw new IllegalStateException("Missing chunkholder when required");
++ }
++
++ final ChunkStatus holderStatus = chunkHolder.getCurrentGenStatus();
++ if (holderStatus != null && holderStatus.isOrAfter(requiredStatus)) {
++ return false;
++ }
++
++ if (chunkHolder.hasFailedGeneration()) {
++ return true;
++ }
++
++ center.addGenerationBlockingNeighbour(chunkHolder);
++ chunkHolder.addWaitingNeighbour(center, requiredStatus);
++
++ if (chunkHolder.upgradeGenTarget(requiredStatus)) {
++ return true;
++ }
++
++ // not at status required, so we need to schedule its generation
++ this.schedule(
++ chunkX, chunkZ, requiredStatus, chunkHolder, tasks, minPriority
++ );
++
++ return true;
++ }
++
++ /**
++ * @deprecated Chunk tasks must be tied to coordinates in the future
++ */
++ @Deprecated
++ public PrioritisedExecutor.PrioritisedTask scheduleChunkTask(final Runnable run) {
++ return this.scheduleChunkTask(run, PrioritisedExecutor.Priority.NORMAL);
++ }
++
++ /**
++ * @deprecated Chunk tasks must be tied to coordinates in the future
++ */
++ @Deprecated
++ public PrioritisedExecutor.PrioritisedTask scheduleChunkTask(final Runnable run, final PrioritisedExecutor.Priority priority) {
++ return this.mainThreadExecutor.queueRunnable(run, priority);
++ }
++
++ public PrioritisedExecutor.PrioritisedTask createChunkTask(final int chunkX, final int chunkZ, final Runnable run) {
++ return this.createChunkTask(chunkX, chunkZ, run, PrioritisedExecutor.Priority.NORMAL);
++ }
++
++ public PrioritisedExecutor.PrioritisedTask createChunkTask(final int chunkX, final int chunkZ, final Runnable run,
++ final PrioritisedExecutor.Priority priority) {
++ return this.mainThreadExecutor.createTask(run, priority);
++ }
++
++ public PrioritisedExecutor.PrioritisedTask scheduleChunkTask(final int chunkX, final int chunkZ, final Runnable run) {
++ return this.mainThreadExecutor.queueRunnable(run);
++ }
++
++ public PrioritisedExecutor.PrioritisedTask scheduleChunkTask(final int chunkX, final int chunkZ, final Runnable run,
++ final PrioritisedExecutor.Priority priority) {
++ return this.mainThreadExecutor.queueRunnable(run, priority);
++ }
++
++ public void executeTasksUntil(final BooleanSupplier exit) {
++ if (Bukkit.isPrimaryThread()) {
++ this.mainThreadExecutor.executeConditionally(exit);
++ } else {
++ long counter = 1L;
++ while (!exit.getAsBoolean()) {
++ counter = ConcurrentUtil.linearLongBackoff(counter, 100_000L, 5_000_000L); // 100us, 5ms
++ }
++ }
++ }
++
++ public boolean halt(final boolean sync, final long maxWaitNS) {
++ this.lightExecutor.halt();
++ this.genExecutor.halt();
++ this.parallelGenExecutor.halt();
++ this.loadExecutor.halt();
++ final long time = System.nanoTime();
++ if (sync) {
++ for (long failures = 9L;; failures = ConcurrentUtil.linearLongBackoff(failures, 500_000L, 50_000_000L)) {
++ if (
++ !this.lightExecutor.isActive() &&
++ !this.genExecutor.isActive() &&
++ !this.parallelGenExecutor.isActive() &&
++ !this.loadExecutor.isActive()
++ ) {
++ return true;
++ }
++ if ((System.nanoTime() - time) >= maxWaitNS) {
++ return false;
++ }
++ }
++ }
++
++ return true;
++ }
++
++ public static final ArrayDeque WAITING_CHUNKS = new ArrayDeque<>(); // stack
++
++ public static final class ChunkInfo {
++
++ public final int chunkX;
++ public final int chunkZ;
++ public final ServerLevel world;
++
++ public ChunkInfo(final int chunkX, final int chunkZ, final ServerLevel world) {
++ this.chunkX = chunkX;
++ this.chunkZ = chunkZ;
++ this.world = world;
++ }
++
++ @Override
++ public String toString() {
++ return "[( " + this.chunkX + "," + this.chunkZ + ") in '" + this.world.getWorld().getName() + "']";
++ }
++ }
++
++ public static void pushChunkWait(final ServerLevel world, final int chunkX, final int chunkZ) {
++ synchronized (WAITING_CHUNKS) {
++ WAITING_CHUNKS.push(new ChunkInfo(chunkX, chunkZ, world));
++ }
++ }
++
++ public static void popChunkWait() {
++ synchronized (WAITING_CHUNKS) {
++ WAITING_CHUNKS.pop();
++ }
++ }
++
++ public static ChunkInfo[] getChunkInfos() {
++ synchronized (WAITING_CHUNKS) {
++ return WAITING_CHUNKS.toArray(new ChunkInfo[0]);
++ }
++ }
++
++ public static void dumpAllChunkLoadInfo(final boolean longPrint) {
++ final ChunkInfo[] chunkInfos = getChunkInfos();
++ if (chunkInfos.length > 0) {
++ LOGGER.error("Chunk wait task info below: ");
++ for (final ChunkInfo chunkInfo : chunkInfos) {
++ final NewChunkHolder holder = chunkInfo.world.chunkTaskScheduler.chunkHolderManager.getChunkHolder(chunkInfo.chunkX, chunkInfo.chunkZ);
++ LOGGER.error("Chunk wait: " + chunkInfo);
++ LOGGER.error("Chunk holder: " + holder);
++ }
++
++ if (longPrint) {
++ final File file = new File(new File(new File("."), "debug"), "chunks-watchdog.txt");
++ LOGGER.error("Writing chunk information dump to " + file);
++ try {
++ MCUtil.dumpChunks(file, true);
++ LOGGER.error("Successfully written chunk information!");
++ } catch (final Throwable thr) {
++ MinecraftServer.LOGGER.warn("Failed to dump chunk information to file " + file.toString(), thr);
++ }
++ }
++ }
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkUpgradeGenericStatusTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkUpgradeGenericStatusTask.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..73ce0909bd89244835a0d0f2030a25871461f1e0
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkUpgradeGenericStatusTask.java
+@@ -0,0 +1,209 @@
++package io.papermc.paper.chunk.system.scheduling;
++
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
++import com.mojang.datafixers.util.Either;
++import com.mojang.logging.LogUtils;
++import net.minecraft.server.level.ChunkHolder;
++import net.minecraft.server.level.ChunkMap;
++import net.minecraft.server.level.ServerChunkCache;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.world.level.chunk.ChunkAccess;
++import net.minecraft.world.level.chunk.ChunkStatus;
++import net.minecraft.world.level.chunk.ProtoChunk;
++import org.slf4j.Logger;
++import java.lang.invoke.VarHandle;
++import java.util.List;
++import java.util.Map;
++import java.util.concurrent.CompletableFuture;
++
++public final class ChunkUpgradeGenericStatusTask extends ChunkProgressionTask implements Runnable {
++
++ private static final Logger LOGGER = LogUtils.getClassLogger();
++
++ protected final ChunkAccess fromChunk;
++ protected final ChunkStatus fromStatus;
++ protected final ChunkStatus toStatus;
++ protected final List neighbours;
++
++ protected final PrioritisedExecutor.PrioritisedTask generateTask;
++
++ public ChunkUpgradeGenericStatusTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX,
++ final int chunkZ, final ChunkAccess chunk, final List neighbours,
++ final ChunkStatus toStatus, final PrioritisedExecutor.Priority priority) {
++ super(scheduler, world, chunkX, chunkZ);
++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority " + priority);
++ }
++ this.fromChunk = chunk;
++ this.fromStatus = chunk.getStatus();
++ this.toStatus = toStatus;
++ this.neighbours = neighbours;
++ this.generateTask = (this.toStatus.isParallelCapable ? this.scheduler.parallelGenExecutor : this.scheduler.genExecutor)
++ .createTask(this, priority);
++ }
++
++ @Override
++ public ChunkStatus getTargetStatus() {
++ return this.toStatus;
++ }
++
++ private boolean isEmptyTask() {
++ // must use fromStatus here to avoid any race condition with run() overwriting the status
++ final boolean generation = !this.fromStatus.isOrAfter(this.toStatus);
++ return (generation && this.toStatus.isEmptyGenStatus()) || (!generation && this.toStatus.isEmptyLoadStatus());
++ }
++
++ @Override
++ public void run() {
++ final ChunkAccess chunk = this.fromChunk;
++
++ final ServerChunkCache serverChunkCache = this.world.chunkSource;
++ final ChunkMap chunkMap = serverChunkCache.chunkMap;
++
++ final CompletableFuture> completeFuture;
++
++ final boolean generation;
++ boolean completing = false;
++
++ // note: should optimise the case where the chunk does not need to execute the status, because
++ // schedule() calls this synchronously if it will run through that path
++
++ try {
++ generation = !chunk.getStatus().isOrAfter(this.toStatus);
++ if (generation) {
++ if (this.toStatus.isEmptyGenStatus()) {
++ if (chunk instanceof ProtoChunk) {
++ ((ProtoChunk)chunk).setStatus(this.toStatus);
++ }
++ completing = true;
++ this.complete(chunk, null);
++ return;
++ }
++ completeFuture = this.toStatus.generate(Runnable::run, this.world, chunkMap.generator, chunkMap.structureTemplateManager,
++ serverChunkCache.getLightEngine(), null, this.neighbours, false)
++ .whenComplete((final Either either, final Throwable throwable) -> {
++ final ChunkAccess newChunk = (either == null) ? null : either.left().orElse(null);
++ if (newChunk instanceof ProtoChunk) {
++ ((ProtoChunk)newChunk).setStatus(ChunkUpgradeGenericStatusTask.this.toStatus);
++ }
++ }
++ );
++ } else {
++ if (this.toStatus.isEmptyLoadStatus()) {
++ completing = true;
++ this.complete(chunk, null);
++ return;
++ }
++ completeFuture = this.toStatus.load(this.world, chunkMap.structureTemplateManager, serverChunkCache.getLightEngine(), null, chunk);
++ }
++ } catch (final Throwable throwable) {
++ if (!completing) {
++ this.complete(null, throwable);
++
++ if (throwable instanceof ThreadDeath) {
++ throw (ThreadDeath)throwable;
++ }
++ return;
++ }
++
++ this.scheduler.unrecoverableChunkSystemFailure(this.chunkX, this.chunkZ, Map.of(
++ "Target status", ChunkTaskScheduler.stringIfNull(this.toStatus),
++ "From status", ChunkTaskScheduler.stringIfNull(this.fromStatus),
++ "Generation task", this
++ ), throwable);
++
++ if (!(throwable instanceof ThreadDeath)) {
++ LOGGER.error("Failed to complete status for chunk: status:" + this.toStatus + ", chunk: (" + this.chunkX + "," + this.chunkZ + "), world: " + this.world.getWorld().getName(), throwable);
++ } else {
++ // ensure the chunk system can respond, then die
++ throw (ThreadDeath)throwable;
++ }
++ return;
++ }
++
++ if (!completeFuture.isDone() && !this.toStatus.warnedAboutNoImmediateComplete.getAndSet(true)) {
++ LOGGER.warn("Future status not complete after scheduling: " + this.toStatus.toString() + ", generate: " + generation);
++ }
++
++ final Either either;
++ final ChunkAccess newChunk;
++
++ try {
++ either = completeFuture.join();
++ newChunk = (either == null) ? null : either.left().orElse(null);
++ } catch (final Throwable throwable) {
++ this.complete(null, throwable);
++ // ensure the chunk system can respond, then die
++ if (throwable instanceof ThreadDeath) {
++ throw (ThreadDeath)throwable;
++ }
++ return;
++ }
++
++ if (newChunk == null) {
++ this.complete(null, new IllegalStateException("Chunk for status: " + ChunkUpgradeGenericStatusTask.this.toStatus.toString() + ", generation: " + generation + " should not be null! Either: " + either).fillInStackTrace());
++ return;
++ }
++
++ this.complete(newChunk, null);
++ }
++
++ protected volatile boolean scheduled;
++ protected static final VarHandle SCHEDULED_HANDLE = ConcurrentUtil.getVarHandle(ChunkUpgradeGenericStatusTask.class, "scheduled", boolean.class);
++
++ @Override
++ public boolean isScheduled() {
++ return this.scheduled;
++ }
++
++ @Override
++ public void schedule() {
++ if ((boolean)SCHEDULED_HANDLE.getAndSet((ChunkUpgradeGenericStatusTask)this, true)) {
++ throw new IllegalStateException("Cannot double call schedule()");
++ }
++ if (this.isEmptyTask()) {
++ if (this.generateTask.cancel()) {
++ this.run();
++ }
++ } else {
++ this.generateTask.queue();
++ }
++ }
++
++ @Override
++ public void cancel() {
++ if (this.generateTask.cancel()) {
++ this.complete(null, null);
++ }
++ }
++
++ @Override
++ public PrioritisedExecutor.Priority getPriority() {
++ return this.generateTask.getPriority();
++ }
++
++ @Override
++ public void lowerPriority(final PrioritisedExecutor.Priority priority) {
++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority " + priority);
++ }
++ this.generateTask.lowerPriority(priority);
++ }
++
++ @Override
++ public void setPriority(final PrioritisedExecutor.Priority priority) {
++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority " + priority);
++ }
++ this.generateTask.setPriority(priority);
++ }
++
++ @Override
++ public void raisePriority(final PrioritisedExecutor.Priority priority) {
++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority " + priority);
++ }
++ this.generateTask.raisePriority(priority);
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/GenericDataLoadTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/GenericDataLoadTask.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..396d72c00e47cf1669ae20dc839c1c961b1f262a
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/GenericDataLoadTask.java
+@@ -0,0 +1,746 @@
++package io.papermc.paper.chunk.system.scheduling;
++
++import ca.spottedleaf.concurrentutil.completable.Completable;
++import ca.spottedleaf.concurrentutil.executor.Cancellable;
++import ca.spottedleaf.concurrentutil.executor.standard.DelayedPrioritisedTask;
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
++import com.mojang.logging.LogUtils;
++import io.papermc.paper.chunk.system.io.RegionFileIOThread;
++import net.minecraft.nbt.CompoundTag;
++import net.minecraft.server.level.ServerLevel;
++import org.slf4j.Logger;
++import java.lang.invoke.VarHandle;
++import java.util.Map;
++import java.util.concurrent.atomic.AtomicBoolean;
++import java.util.concurrent.atomic.AtomicLong;
++import java.util.function.BiConsumer;
++
++public abstract class GenericDataLoadTask {
++
++ private static final Logger LOGGER = LogUtils.getClassLogger();
++
++ protected static final CompoundTag CANCELLED_DATA = new CompoundTag();
++
++ // reference count is the upper 32 bits
++ protected final AtomicLong stageAndReferenceCount = new AtomicLong(STAGE_NOT_STARTED);
++
++ protected static final long STAGE_MASK = 0xFFFFFFFFL;
++ protected static final long STAGE_CANCELLED = 0xFFFFFFFFL;
++ protected static final long STAGE_NOT_STARTED = 0L;
++ protected static final long STAGE_LOADING = 1L;
++ protected static final long STAGE_PROCESSING = 2L;
++ protected static final long STAGE_COMPLETED = 3L;
++
++ // for loading data off disk
++ protected final LoadDataFromDiskTask loadDataFromDiskTask;
++ // processing off-main
++ protected final PrioritisedExecutor.PrioritisedTask processOffMain;
++ // processing on-main
++ protected final PrioritisedExecutor.PrioritisedTask processOnMain;
++
++ protected final ChunkTaskScheduler scheduler;
++ protected final ServerLevel world;
++ protected final int chunkX;
++ protected final int chunkZ;
++ protected final RegionFileIOThread.RegionFileType type;
++
++ public GenericDataLoadTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX,
++ final int chunkZ, final RegionFileIOThread.RegionFileType type,
++ final PrioritisedExecutor.Priority priority) {
++ this.scheduler = scheduler;
++ this.world = world;
++ this.chunkX = chunkX;
++ this.chunkZ = chunkZ;
++ this.type = type;
++
++ final ProcessOnMainTask mainTask;
++ if (this.hasOnMain()) {
++ mainTask = new ProcessOnMainTask();
++ this.processOnMain = this.createOnMain(mainTask, priority);
++ } else {
++ mainTask = null;
++ this.processOnMain = null;
++ }
++
++ final ProcessOffMainTask offMainTask;
++ if (this.hasOffMain()) {
++ offMainTask = new ProcessOffMainTask(mainTask);
++ this.processOffMain = this.createOffMain(offMainTask, priority);
++ } else {
++ offMainTask = null;
++ this.processOffMain = null;
++ }
++
++ if (this.processOffMain == null && this.processOnMain == null) {
++ throw new IllegalStateException("Illegal class implementation: " + this.getClass().getName() + ", should be able to schedule at least one task!");
++ }
++
++ this.loadDataFromDiskTask = new LoadDataFromDiskTask(world, chunkX, chunkZ, type, new DataLoadCallback(offMainTask, mainTask), priority);
++ }
++
++ public static final record TaskResult(L left, R right) {}
++
++ protected abstract boolean hasOffMain();
++
++ protected abstract boolean hasOnMain();
++
++ protected abstract PrioritisedExecutor.PrioritisedTask createOffMain(final Runnable run, final PrioritisedExecutor.Priority priority);
++
++ protected abstract PrioritisedExecutor.PrioritisedTask createOnMain(final Runnable run, final PrioritisedExecutor.Priority priority);
++
++ protected abstract TaskResult runOffMain(final CompoundTag data, final Throwable throwable);
++
++ protected abstract TaskResult runOnMain(final OnMain data, final Throwable throwable);
++
++ protected abstract void onComplete(final TaskResult result);
++
++ protected abstract TaskResult completeOnMainOffMain(final OnMain data, final Throwable throwable);
++
++ @Override
++ public String toString() {
++ return "GenericDataLoadTask{class: " + this.getClass().getName() + ", world: " + this.world.getWorld().getName() +
++ ", chunk: (" + this.chunkX + "," + this.chunkZ + "), hashcode: " + System.identityHashCode(this) + ", priority: " + this.getPriority() +
++ ", type: " + this.type.toString() + "}";
++ }
++
++ public PrioritisedExecutor.Priority getPriority() {
++ if (this.processOnMain != null) {
++ return this.processOnMain.getPriority();
++ } else {
++ return this.processOffMain.getPriority();
++ }
++ }
++
++ public void lowerPriority(final PrioritisedExecutor.Priority priority) {
++ // can't lower I/O tasks, we don't know what they affect
++ if (this.processOffMain != null) {
++ this.processOffMain.lowerPriority(priority);
++ }
++ if (this.processOnMain != null) {
++ this.processOnMain.lowerPriority(priority);
++ }
++ }
++
++ public void setPriority(final PrioritisedExecutor.Priority priority) {
++ // can't lower I/O tasks, we don't know what they affect
++ this.loadDataFromDiskTask.raisePriority(priority);
++ if (this.processOffMain != null) {
++ this.processOffMain.setPriority(priority);
++ }
++ if (this.processOnMain != null) {
++ this.processOnMain.setPriority(priority);
++ }
++ }
++
++ public void raisePriority(final PrioritisedExecutor.Priority priority) {
++ // can't lower I/O tasks, we don't know what they affect
++ this.loadDataFromDiskTask.raisePriority(priority);
++ if (this.processOffMain != null) {
++ this.processOffMain.raisePriority(priority);
++ }
++ if (this.processOnMain != null) {
++ this.processOnMain.raisePriority(priority);
++ }
++ }
++
++ // returns whether scheduleNow() needs to be called
++ public boolean schedule(final boolean delay) {
++ if (this.stageAndReferenceCount.get() != STAGE_NOT_STARTED ||
++ !this.stageAndReferenceCount.compareAndSet(STAGE_NOT_STARTED, (1L << 32) | STAGE_LOADING)) {
++ // try and increment reference count
++ int failures = 0;
++ for (long curr = this.stageAndReferenceCount.get();;) {
++ if ((curr & STAGE_MASK) == STAGE_CANCELLED || (curr & STAGE_MASK) == STAGE_COMPLETED) {
++ // cancelled or completed, nothing to do here
++ return false;
++ }
++
++ if (curr == (curr = this.stageAndReferenceCount.compareAndExchange(curr, curr + (1L << 32)))) {
++ // successful
++ return false;
++ }
++
++ ++failures;
++ for (int i = 0; i < failures; ++i) {
++ ConcurrentUtil.backoff();
++ }
++ }
++ }
++
++ if (!delay) {
++ this.scheduleNow();
++ return false;
++ }
++ return true;
++ }
++
++ public void scheduleNow() {
++ this.loadDataFromDiskTask.schedule(); // will schedule the rest
++ }
++
++ // assumes the current stage cannot be completed
++ // returns false if cancelled, returns true if can proceed
++ private boolean advanceStage(final long expect, final long to) {
++ int failures = 0;
++ for (long curr = this.stageAndReferenceCount.get();;) {
++ if ((curr & STAGE_MASK) != expect) {
++ // must be cancelled
++ return false;
++ }
++
++ final long newVal = (curr & ~STAGE_MASK) | to;
++ if (curr == (curr = this.stageAndReferenceCount.compareAndExchange(curr, newVal))) {
++ return true;
++ }
++
++ ++failures;
++ for (int i = 0; i < failures; ++i) {
++ ConcurrentUtil.backoff();
++ }
++ }
++ }
++
++ public boolean cancel() {
++ int failures = 0;
++ for (long curr = this.stageAndReferenceCount.get();;) {
++ if ((curr & STAGE_MASK) == STAGE_COMPLETED || (curr & STAGE_MASK) == STAGE_CANCELLED) {
++ return false;
++ }
++
++ if ((curr & STAGE_MASK) == STAGE_NOT_STARTED || (curr & ~STAGE_MASK) == (1L << 32)) {
++ // no other references, so we can cancel
++ final long newVal = STAGE_CANCELLED;
++ if (curr == (curr = this.stageAndReferenceCount.compareAndExchange(curr, newVal))) {
++ this.loadDataFromDiskTask.cancel();
++ if (this.processOffMain != null) {
++ this.processOffMain.cancel();
++ }
++ if (this.processOnMain != null) {
++ this.processOnMain.cancel();
++ }
++ this.onComplete(null);
++ return true;
++ }
++ } else {
++ if ((curr & ~STAGE_MASK) == (0L << 32)) {
++ throw new IllegalStateException("Reference count cannot be zero here");
++ }
++ // just decrease the reference count
++ final long newVal = curr - (1L << 32);
++ if (curr == (curr = this.stageAndReferenceCount.compareAndExchange(curr, newVal))) {
++ return false;
++ }
++ }
++
++ ++failures;
++ for (int i = 0; i < failures; ++i) {
++ ConcurrentUtil.backoff();
++ }
++ }
++ }
++
++ protected final class DataLoadCallback implements BiConsumer {
++
++ protected final ProcessOffMainTask offMainTask;
++ protected final ProcessOnMainTask onMainTask;
++
++ public DataLoadCallback(final ProcessOffMainTask offMainTask, final ProcessOnMainTask onMainTask) {
++ this.offMainTask = offMainTask;
++ this.onMainTask = onMainTask;
++ }
++
++ @Override
++ public void accept(final CompoundTag compoundTag, final Throwable throwable) {
++ if (GenericDataLoadTask.this.stageAndReferenceCount.get() == STAGE_CANCELLED) {
++ // don't try to schedule further
++ return;
++ }
++
++ try {
++ if (compoundTag == CANCELLED_DATA) {
++ // cancelled, except this isn't possible
++ LOGGER.error("Data callback says cancelled, but stage does not?");
++ return;
++ }
++
++ // get off of the regionfile callback ASAP, no clue what locks are held right now...
++ if (GenericDataLoadTask.this.processOffMain != null) {
++ this.offMainTask.data = compoundTag;
++ this.offMainTask.throwable = throwable;
++ GenericDataLoadTask.this.processOffMain.queue();
++ return;
++ } else {
++ // no off-main task, so go straight to main
++ this.onMainTask.data = (OnMain)compoundTag;
++ this.onMainTask.throwable = throwable;
++ GenericDataLoadTask.this.processOnMain.queue();
++ }
++ } catch (final ThreadDeath death) {
++ throw death;
++ } catch (final Throwable thr2) {
++ LOGGER.error("Failed I/O callback for task: " + GenericDataLoadTask.this.toString(), thr2);
++ GenericDataLoadTask.this.scheduler.unrecoverableChunkSystemFailure(
++ GenericDataLoadTask.this.chunkX, GenericDataLoadTask.this.chunkZ, Map.of(
++ "Callback throwable", ChunkTaskScheduler.stringIfNull(throwable)
++ ), thr2);
++ }
++ }
++ }
++
++ protected final class ProcessOffMainTask implements Runnable {
++
++ protected CompoundTag data;
++ protected Throwable throwable;
++ protected final ProcessOnMainTask schedule;
++
++ public ProcessOffMainTask(final ProcessOnMainTask schedule) {
++ this.schedule = schedule;
++ }
++
++ @Override
++ public void run() {
++ if (!GenericDataLoadTask.this.advanceStage(STAGE_LOADING, this.schedule == null ? STAGE_COMPLETED : STAGE_PROCESSING)) {
++ // cancelled
++ return;
++ }
++ final TaskResult newData = GenericDataLoadTask.this.runOffMain(this.data, this.throwable);
++
++ if (GenericDataLoadTask.this.stageAndReferenceCount.get() == STAGE_CANCELLED) {
++ // don't try to schedule further
++ return;
++ }
++
++ if (this.schedule != null) {
++ final TaskResult syncComplete = GenericDataLoadTask.this.completeOnMainOffMain(newData.left, newData.right);
++
++ if (syncComplete != null) {
++ if (GenericDataLoadTask.this.advanceStage(STAGE_PROCESSING, STAGE_COMPLETED)) {
++ GenericDataLoadTask.this.onComplete(syncComplete);
++ } // else: cancelled
++ return;
++ }
++
++ this.schedule.data = newData.left;
++ this.schedule.throwable = newData.right;
++
++ GenericDataLoadTask.this.processOnMain.queue();
++ } else {
++ GenericDataLoadTask.this.onComplete((TaskResult)newData);
++ }
++ }
++ }
++
++ protected final class ProcessOnMainTask implements Runnable {
++
++ protected OnMain data;
++ protected Throwable throwable;
++
++ @Override
++ public void run() {
++ if (!GenericDataLoadTask.this.advanceStage(STAGE_PROCESSING, STAGE_COMPLETED)) {
++ // cancelled
++ return;
++ }
++ final TaskResult result = GenericDataLoadTask.this.runOnMain(this.data, this.throwable);
++
++ GenericDataLoadTask.this.onComplete(result);
++ }
++ }
++
++ public static final class LoadDataFromDiskTask {
++
++ protected volatile int priority;
++ protected static final VarHandle PRIORITY_HANDLE = ConcurrentUtil.getVarHandle(LoadDataFromDiskTask.class, "priority", int.class);
++
++ protected static final int PRIORITY_EXECUTED = Integer.MIN_VALUE >>> 0;
++ protected static final int PRIORITY_LOAD_SCHEDULED = Integer.MIN_VALUE >>> 1;
++ protected static final int PRIORITY_UNLOAD_SCHEDULED = Integer.MIN_VALUE >>> 2;
++
++ protected static final int PRIORITY_FLAGS = ~Character.MAX_VALUE;
++
++ protected final int getPriorityVolatile() {
++ return (int)PRIORITY_HANDLE.getVolatile((LoadDataFromDiskTask)this);
++ }
++
++ protected final int compareAndExchangePriorityVolatile(final int expect, final int update) {
++ return (int)PRIORITY_HANDLE.compareAndExchange((LoadDataFromDiskTask)this, (int)expect, (int)update);
++ }
++
++ protected final int getAndOrPriorityVolatile(final int val) {
++ return (int)PRIORITY_HANDLE.getAndBitwiseOr((LoadDataFromDiskTask)this, (int)val);
++ }
++
++ protected final void setPriorityPlain(final int val) {
++ PRIORITY_HANDLE.set((LoadDataFromDiskTask)this, (int)val);
++ }
++
++ private final ServerLevel world;
++ private final int chunkX;
++ private final int chunkZ;
++
++ private final RegionFileIOThread.RegionFileType type;
++ private Cancellable dataLoadTask;
++ private Cancellable dataUnloadCancellable;
++ private DelayedPrioritisedTask dataUnloadTask;
++
++ private final BiConsumer onComplete;
++
++ // onComplete should be caller sensitive, it may complete synchronously with schedule() - which does
++ // hold a priority lock.
++ public LoadDataFromDiskTask(final ServerLevel world, final int chunkX, final int chunkZ,
++ final RegionFileIOThread.RegionFileType type,
++ final BiConsumer onComplete,
++ final PrioritisedExecutor.Priority priority) {
++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority " + priority);
++ }
++ this.world = world;
++ this.chunkX = chunkX;
++ this.chunkZ = chunkZ;
++ this.type = type;
++ this.onComplete = onComplete;
++ this.setPriorityPlain(priority.priority);
++ }
++
++ private void complete(final CompoundTag data, final Throwable throwable) {
++ try {
++ this.onComplete.accept(data, throwable);
++ } catch (final Throwable thr2) {
++ this.world.chunkTaskScheduler.unrecoverableChunkSystemFailure(this.chunkX, this.chunkZ, Map.of(
++ "Completed throwable", ChunkTaskScheduler.stringIfNull(throwable),
++ "Regionfile type", ChunkTaskScheduler.stringIfNull(this.type)
++ ), thr2);
++ if (thr2 instanceof ThreadDeath) {
++ throw (ThreadDeath)thr2;
++ }
++ }
++ }
++
++ protected boolean markExecuting() {
++ return (this.getAndOrPriorityVolatile(PRIORITY_EXECUTED) & PRIORITY_EXECUTED) == 0;
++ }
++
++ protected boolean isMarkedExecuted() {
++ return (this.getPriorityVolatile() & PRIORITY_EXECUTED) != 0;
++ }
++
++ public void lowerPriority(final PrioritisedExecutor.Priority priority) {
++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority " + priority);
++ }
++
++ int failures = 0;
++ for (int curr = this.getPriorityVolatile();;) {
++ if ((curr & PRIORITY_EXECUTED) != 0) {
++ // cancelled or executed
++ return;
++ }
++
++ if ((curr & PRIORITY_LOAD_SCHEDULED) != 0) {
++ RegionFileIOThread.lowerPriority(this.world, this.chunkX, this.chunkZ, this.type, priority);
++ return;
++ }
++
++ if ((curr & PRIORITY_UNLOAD_SCHEDULED) != 0) {
++ if (this.dataUnloadTask != null) {
++ this.dataUnloadTask.lowerPriority(priority);
++ }
++ // no return - we need to propagate priority
++ }
++
++ if (!priority.isHigherPriority(curr & ~PRIORITY_FLAGS)) {
++ return;
++ }
++
++ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, priority.priority | (curr & PRIORITY_FLAGS)))) {
++ return;
++ }
++
++ // failed, retry
++
++ ++failures;
++ for (int i = 0; i < failures; ++i) {
++ ConcurrentUtil.backoff();
++ }
++ }
++ }
++
++ public void setPriority(final PrioritisedExecutor.Priority priority) {
++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority " + priority);
++ }
++
++ int failures = 0;
++ for (int curr = this.getPriorityVolatile();;) {
++ if ((curr & PRIORITY_EXECUTED) != 0) {
++ // cancelled or executed
++ return;
++ }
++
++ if ((curr & PRIORITY_LOAD_SCHEDULED) != 0) {
++ RegionFileIOThread.setPriority(this.world, this.chunkX, this.chunkZ, this.type, priority);
++ return;
++ }
++
++ if ((curr & PRIORITY_UNLOAD_SCHEDULED) != 0) {
++ if (this.dataUnloadTask != null) {
++ this.dataUnloadTask.setPriority(priority);
++ }
++ // no return - we need to propagate priority
++ }
++
++ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, priority.priority | (curr & PRIORITY_FLAGS)))) {
++ return;
++ }
++
++ // failed, retry
++
++ ++failures;
++ for (int i = 0; i < failures; ++i) {
++ ConcurrentUtil.backoff();
++ }
++ }
++ }
++
++ public void raisePriority(final PrioritisedExecutor.Priority priority) {
++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority " + priority);
++ }
++
++ int failures = 0;
++ for (int curr = this.getPriorityVolatile();;) {
++ if ((curr & PRIORITY_EXECUTED) != 0) {
++ // cancelled or executed
++ return;
++ }
++
++ if ((curr & PRIORITY_LOAD_SCHEDULED) != 0) {
++ RegionFileIOThread.raisePriority(this.world, this.chunkX, this.chunkZ, this.type, priority);
++ return;
++ }
++
++ if ((curr & PRIORITY_UNLOAD_SCHEDULED) != 0) {
++ if (this.dataUnloadTask != null) {
++ this.dataUnloadTask.raisePriority(priority);
++ }
++ // no return - we need to propagate priority
++ }
++
++ if (!priority.isLowerPriority(curr & ~PRIORITY_FLAGS)) {
++ return;
++ }
++
++ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, priority.priority | (curr & PRIORITY_FLAGS)))) {
++ return;
++ }
++
++ // failed, retry
++
++ ++failures;
++ for (int i = 0; i < failures; ++i) {
++ ConcurrentUtil.backoff();
++ }
++ }
++ }
++
++ public void cancel() {
++ if ((this.getAndOrPriorityVolatile(PRIORITY_EXECUTED) & PRIORITY_EXECUTED) != 0) {
++ // cancelled or executed already
++ return;
++ }
++
++ // OK if we miss the field read, the task cannot complete if the cancelled bit is set and
++ // the write to dataLoadTask will check for the cancelled bit
++ if (this.dataUnloadCancellable != null) {
++ this.dataUnloadCancellable.cancel();
++ }
++
++ if (this.dataLoadTask != null) {
++ this.dataLoadTask.cancel();
++ }
++
++ this.complete(CANCELLED_DATA, null);
++ }
++
++ private final AtomicBoolean scheduled = new AtomicBoolean();
++
++ public void schedule() {
++ if (this.scheduled.getAndSet(true)) {
++ throw new IllegalStateException("schedule() called twice");
++ }
++ int priority = this.getPriorityVolatile();
++
++ if ((priority & PRIORITY_EXECUTED) != 0) {
++ // cancelled
++ return;
++ }
++
++ final BiConsumer consumer = (final CompoundTag data, final Throwable thr) -> {
++ // because cancelScheduled() cannot actually stop this task from executing in every case, we need
++ // to mark complete here to ensure we do not double complete
++ if (LoadDataFromDiskTask.this.markExecuting()) {
++ LoadDataFromDiskTask.this.complete(data, thr);
++ } // else: cancelled
++ };
++
++ final PrioritisedExecutor.Priority initialPriority = PrioritisedExecutor.Priority.getPriority(priority);
++ boolean scheduledUnload = false;
++
++ final NewChunkHolder holder = this.world.chunkTaskScheduler.chunkHolderManager.getChunkHolder(this.chunkX, this.chunkZ);
++ if (holder != null) {
++ final BiConsumer unloadConsumer = (final CompoundTag data, final Throwable thr) -> {
++ if (data != null) {
++ consumer.accept(data, null);
++ } else {
++ // need to schedule task
++ LoadDataFromDiskTask.this.schedule(false, consumer, PrioritisedExecutor.Priority.getPriority(LoadDataFromDiskTask.this.getPriorityVolatile() & ~PRIORITY_FLAGS));
++ }
++ };
++ Cancellable unloadCancellable = null;
++ CompoundTag syncComplete = null;
++ final NewChunkHolder.UnloadTask unloadTask = holder.getUnloadTask(this.type); // can be null if no task exists
++ final Completable unloadCompletable = unloadTask == null ? null : unloadTask.completable();
++ if (unloadCompletable != null) {
++ unloadCancellable = unloadCompletable.addAsynchronousWaiter(unloadConsumer);
++ if (unloadCancellable == null) {
++ syncComplete = unloadCompletable.getResult();
++ }
++ }
++
++ if (syncComplete != null) {
++ consumer.accept(syncComplete, null);
++ return;
++ }
++
++ if (unloadCancellable != null) {
++ scheduledUnload = true;
++ this.dataUnloadCancellable = unloadCancellable;
++ this.dataUnloadTask = unloadTask.task();
++ }
++ }
++
++ this.schedule(scheduledUnload, consumer, initialPriority);
++ }
++
++ private void schedule(final boolean scheduledUnload, final BiConsumer consumer, final PrioritisedExecutor.Priority initialPriority) {
++ int priority = this.getPriorityVolatile();
++
++ if ((priority & PRIORITY_EXECUTED) != 0) {
++ // cancelled
++ return;
++ }
++
++ if (!scheduledUnload) {
++ this.dataLoadTask = RegionFileIOThread.loadDataAsync(
++ this.world, this.chunkX, this.chunkZ, this.type, consumer,
++ initialPriority.isHigherPriority(PrioritisedExecutor.Priority.NORMAL), initialPriority
++ );
++ }
++
++ int failures = 0;
++ for (;;) {
++ if (priority == (priority = this.compareAndExchangePriorityVolatile(priority, priority | (scheduledUnload ? PRIORITY_UNLOAD_SCHEDULED : PRIORITY_LOAD_SCHEDULED)))) {
++ return;
++ }
++
++ if ((priority & PRIORITY_EXECUTED) != 0) {
++ // cancelled or executed
++ if (this.dataUnloadCancellable != null) {
++ this.dataUnloadCancellable.cancel();
++ }
++
++ if (this.dataLoadTask != null) {
++ this.dataLoadTask.cancel();
++ }
++ return;
++ }
++
++ if (scheduledUnload) {
++ if (this.dataUnloadTask != null) {
++ this.dataUnloadTask.setPriority(PrioritisedExecutor.Priority.getPriority(priority & ~PRIORITY_FLAGS));
++ }
++ } else {
++ RegionFileIOThread.setPriority(this.world, this.chunkX, this.chunkZ, this.type, PrioritisedExecutor.Priority.getPriority(priority & ~PRIORITY_FLAGS));
++ }
++
++ ++failures;
++ for (int i = 0; i < failures; ++i) {
++ ConcurrentUtil.backoff();
++ }
++ }
++ }
++
++ /*
++ private static final class LoadDataPriorityHolder extends PriorityHolder {
++
++ protected final LoadDataFromDiskTask task;
++
++ protected LoadDataPriorityHolder(final PrioritisedExecutor.Priority priority, final LoadDataFromDiskTask task) {
++ super(priority);
++ this.task = task;
++ }
++
++ @Override
++ protected void cancelScheduled() {
++ final Cancellable dataLoadTask = this.task.dataLoadTask;
++ if (dataLoadTask != null) {
++ // OK if we miss the field read, the task cannot complete if the cancelled bit is set and
++ // the write to dataLoadTask will check for the cancelled bit
++ this.task.dataLoadTask.cancel();
++ }
++ this.task.complete(CANCELLED_DATA, null);
++ }
++
++ @Override
++ protected PrioritisedExecutor.Priority getScheduledPriority() {
++ final LoadDataFromDiskTask task = this.task;
++ return RegionFileIOThread.getPriority(task.world, task.chunkX, task.chunkZ, task.type);
++ }
++
++ @Override
++ protected void scheduleTask(final PrioritisedExecutor.Priority priority) {
++ final LoadDataFromDiskTask task = this.task;
++ final BiConsumer consumer = (final CompoundTag data, final Throwable thr) -> {
++ // because cancelScheduled() cannot actually stop this task from executing in every case, we need
++ // to mark complete here to ensure we do not double complete
++ if (LoadDataPriorityHolder.this.markExecuting()) {
++ LoadDataPriorityHolder.this.task.complete(data, thr);
++ } // else: cancelled
++ };
++ task.dataLoadTask = RegionFileIOThread.loadDataAsync(
++ task.world, task.chunkX, task.chunkZ, task.type, consumer,
++ priority.isHigherPriority(PrioritisedExecutor.Priority.NORMAL), priority
++ );
++ if (this.isMarkedExecuted()) {
++ // if we are marked as completed, it could be:
++ // 1. we were cancelled
++ // 2. the consumer was completed
++ // in the 2nd case, cancel() does nothing
++ // in the 1st case, we ensure cancel() is called as it is possible for the cancelling thread
++ // to miss the field write here
++ task.dataLoadTask.cancel();
++ }
++ }
++
++ @Override
++ protected void lowerPriorityScheduled(final PrioritisedExecutor.Priority priority) {
++ final LoadDataFromDiskTask task = this.task;
++ RegionFileIOThread.lowerPriority(task.world, task.chunkX, task.chunkZ, task.type, priority);
++ }
++
++ @Override
++ protected void setPriorityScheduled(final PrioritisedExecutor.Priority priority) {
++ final LoadDataFromDiskTask task = this.task;
++ RegionFileIOThread.setPriority(task.world, task.chunkX, task.chunkZ, task.type, priority);
++ }
++
++ @Override
++ protected void raisePriorityScheduled(final PrioritisedExecutor.Priority priority) {
++ final LoadDataFromDiskTask task = this.task;
++ RegionFileIOThread.raisePriority(task.world, task.chunkX, task.chunkZ, task.type, priority);
++ }
++ }
++ */
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/NewChunkHolder.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/NewChunkHolder.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..8013dd333e27aa5fd0beb431fa32491eec9f5246
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/NewChunkHolder.java
+@@ -0,0 +1,2077 @@
++package io.papermc.paper.chunk.system.scheduling;
++
++import ca.spottedleaf.concurrentutil.completable.Completable;
++import ca.spottedleaf.concurrentutil.executor.Cancellable;
++import ca.spottedleaf.concurrentutil.executor.standard.DelayedPrioritisedTask;
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
++import com.google.gson.JsonArray;
++import com.google.gson.JsonElement;
++import com.google.gson.JsonObject;
++import com.google.gson.JsonPrimitive;
++import com.mojang.logging.LogUtils;
++import io.papermc.paper.chunk.system.io.RegionFileIOThread;
++import io.papermc.paper.chunk.system.poi.PoiChunk;
++import io.papermc.paper.util.CoordinateUtils;
++import io.papermc.paper.util.TickThread;
++import io.papermc.paper.util.WorldUtil;
++import io.papermc.paper.world.ChunkEntitySlices;
++import it.unimi.dsi.fastutil.objects.Reference2ObjectLinkedOpenHashMap;
++import it.unimi.dsi.fastutil.objects.Reference2ObjectMap;
++import it.unimi.dsi.fastutil.objects.Reference2ObjectOpenHashMap;
++import it.unimi.dsi.fastutil.objects.ReferenceLinkedOpenHashSet;
++import net.minecraft.nbt.CompoundTag;
++import net.minecraft.server.level.ChunkHolder;
++import net.minecraft.server.level.ChunkMap;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.server.level.TicketType;
++import net.minecraft.world.entity.Entity;
++import net.minecraft.world.level.ChunkPos;
++import net.minecraft.world.level.chunk.ChunkAccess;
++import net.minecraft.world.level.chunk.ChunkStatus;
++import net.minecraft.world.level.chunk.ImposterProtoChunk;
++import net.minecraft.world.level.chunk.LevelChunk;
++import net.minecraft.world.level.chunk.storage.ChunkSerializer;
++import net.minecraft.world.level.chunk.storage.EntityStorage;
++import org.slf4j.Logger;
++import java.lang.invoke.VarHandle;
++import java.util.ArrayList;
++import java.util.Iterator;
++import java.util.List;
++import java.util.Map;
++import java.util.Objects;
++import java.util.concurrent.atomic.AtomicBoolean;
++import java.util.function.Consumer;
++
++public final class NewChunkHolder {
++
++ private static final Logger LOGGER = LogUtils.getClassLogger();
++
++ public static final Thread.UncaughtExceptionHandler CHUNKSYSTEM_UNCAUGHT_EXCEPTION_HANDLER = new Thread.UncaughtExceptionHandler() {
++ @Override
++ public void uncaughtException(final Thread thread, final Throwable throwable) {
++ if (!(throwable instanceof ThreadDeath)) {
++ LOGGER.error("Uncaught exception in thread " + thread.getName(), throwable);
++ }
++ }
++ };
++
++ public final ServerLevel world;
++ public final int chunkX;
++ public final int chunkZ;
++
++ public final ChunkTaskScheduler scheduler;
++
++ // load/unload state
++
++ // chunk data state
++
++ private ChunkEntitySlices entityChunk;
++ // entity chunk that is loaded, but not yet deserialized
++ private CompoundTag pendingEntityChunk;
++
++ ChunkEntitySlices loadInEntityChunk(final boolean transientChunk) {
++ TickThread.ensureTickThread(this.world, this.chunkX, this.chunkZ, "Cannot sync load entity data off-main");
++ final CompoundTag entityChunk;
++ final ChunkEntitySlices ret;
++ this.scheduler.schedulingLock.lock();
++ try {
++ if (this.entityChunk != null && (transientChunk || !this.entityChunk.isTransient())) {
++ return this.entityChunk;
++ }
++ final CompoundTag pendingEntityChunk = this.pendingEntityChunk;
++ if (!transientChunk && pendingEntityChunk == null) {
++ throw new IllegalStateException("Must load entity data from disk before loading in the entity chunk!");
++ }
++
++ if (this.entityChunk == null) {
++ ret = this.entityChunk = new ChunkEntitySlices(
++ this.world, this.chunkX, this.chunkZ, this.getChunkStatus(),
++ WorldUtil.getMinSection(this.world), WorldUtil.getMaxSection(this.world)
++ );
++
++ ret.setTransient(transientChunk);
++
++ this.world.getEntityLookup().entitySectionLoad(this.chunkX, this.chunkZ, ret);
++ } else {
++ // transientChunk = false here
++ ret = this.entityChunk;
++ this.entityChunk.setTransient(false);
++ }
++
++ if (!transientChunk) {
++ this.pendingEntityChunk = null;
++ entityChunk = pendingEntityChunk == EMPTY_ENTITY_CHUNK ? null : pendingEntityChunk;
++ } else {
++ entityChunk = null;
++ }
++ } finally {
++ this.scheduler.schedulingLock.unlock();
++ }
++
++ if (!transientChunk) {
++ if (entityChunk != null) {
++ final List