From 6d630052fda16e11b12dc543e2e29e31f4f72639 Mon Sep 17 00:00:00 2001
From: Lulu13022002 <41980282+Lulu13022002@users.noreply.github.com>
Date: Sat, 24 Sep 2022 18:34:20 +0200
Subject: [PATCH 001/278] Fix setEggCount method from TurtleLayEggEvent (#8385)
---
patches/api/0161-Turtle-API.patch | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/patches/api/0161-Turtle-API.patch b/patches/api/0161-Turtle-API.patch
index c2dfe44090..a8eec937cc 100644
--- a/patches/api/0161-Turtle-API.patch
+++ b/patches/api/0161-Turtle-API.patch
@@ -61,7 +61,7 @@ index 0000000000000000000000000000000000000000..021356d151ed638068e3e89b8cc77b37
+}
diff --git a/src/main/java/com/destroystokyo/paper/event/entity/TurtleLayEggEvent.java b/src/main/java/com/destroystokyo/paper/event/entity/TurtleLayEggEvent.java
new file mode 100644
-index 0000000000000000000000000000000000000000..a315c5185cd465dcf63c0ababef195da76dfc786
+index 0000000000000000000000000000000000000000..bcc8dba50e3a3df0206c4827bb468bf884837b8b
--- /dev/null
+++ b/src/main/java/com/destroystokyo/paper/event/entity/TurtleLayEggEvent.java
@@ -0,0 +1,87 @@
@@ -129,7 +129,7 @@ index 0000000000000000000000000000000000000000..a315c5185cd465dcf63c0ababef195da
+ cancelled = true;
+ return;
+ }
-+ eggCount = Math.min(eggCount, 4);
++ this.eggCount = Math.min(eggCount, 4);
+ }
+
+ @Override
--
2.39.5
From abe53a7eb477664aba5f32ff22d81f11ed48a44d Mon Sep 17 00:00:00 2001
From: Bjarne Koll
Date: Sun, 25 Sep 2022 02:01:17 +0200
Subject: [PATCH 002/278] Fix typos in isTickingWorlds API javadocs (#8382)
---
...Add-method-isTickingWorlds-to-Bukkit.patch | 20 +++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)
diff --git a/patches/api/0383-Add-method-isTickingWorlds-to-Bukkit.patch b/patches/api/0383-Add-method-isTickingWorlds-to-Bukkit.patch
index 19074e3763..10c607bbbc 100644
--- a/patches/api/0383-Add-method-isTickingWorlds-to-Bukkit.patch
+++ b/patches/api/0383-Add-method-isTickingWorlds-to-Bukkit.patch
@@ -5,7 +5,7 @@ Subject: [PATCH] Add method isTickingWorlds() to Bukkit.
diff --git a/src/main/java/org/bukkit/Bukkit.java b/src/main/java/org/bukkit/Bukkit.java
-index 840aaf9e8fc828b5a7ea02252038c6524680f2e0..b5a7d6ab4e458843f2e163bf06b5668627012f91 100644
+index 840aaf9e8fc828b5a7ea02252038c6524680f2e0..232c08c9a588d957d90f198ce479e57615c6e650 100644
--- a/src/main/java/org/bukkit/Bukkit.java
+++ b/src/main/java/org/bukkit/Bukkit.java
@@ -753,12 +753,26 @@ public final class Bukkit {
@@ -14,7 +14,7 @@ index 840aaf9e8fc828b5a7ea02252038c6524680f2e0..b5a7d6ab4e458843f2e163bf06b56686
+ // Paper start
+ /**
-+ * Gets whether the worlds are being ticked right not or not.
++ * Gets whether the worlds are being ticked right now.
+ *
+ * @return true if the worlds are being ticked, false otherwise.
+ */
@@ -31,7 +31,7 @@ index 840aaf9e8fc828b5a7ea02252038c6524680f2e0..b5a7d6ab4e458843f2e163bf06b56686
* getWorld(creator.name()).
+ *
+ * Do note that un/loading worlds mid-tick may have potential side effects, we strongly recommend
-+ * ensuring that you're not loading worlds midtick by checking {@link Bukkit#isTickingWorlds()}
++ * ensuring that you're not un/loading worlds midtick by checking {@link Bukkit#isTickingWorlds()}
*
* @param creator the options to use when creating the world
* @return newly created or loaded world
@@ -41,7 +41,7 @@ index 840aaf9e8fc828b5a7ea02252038c6524680f2e0..b5a7d6ab4e458843f2e163bf06b56686
* Unloads a world with the given name.
+ *
+ * Do note that un/loading worlds mid-tick may have potential side effects, we strongly recommend
-+ * ensuring that you're not loading worlds midtick by checking {@link Bukkit#isTickingWorlds()}
++ * ensuring that you're not un/loading worlds midtick by checking {@link Bukkit#isTickingWorlds()}
*
* @param name Name of the world to unload
* @param save whether to save the chunks before unloading
@@ -51,12 +51,12 @@ index 840aaf9e8fc828b5a7ea02252038c6524680f2e0..b5a7d6ab4e458843f2e163bf06b56686
* Unloads the given world.
+ *
+ * Do note that un/loading worlds mid-tick may have potential side effects, we strongly recommend
-+ * ensuring that you're not loading worlds midtick by checking {@link Bukkit#isTickingWorlds()}
++ * ensuring that you're not un/loading worlds midtick by checking {@link Bukkit#isTickingWorlds()}
*
* @param world the world to unload
* @param save whether to save the chunks before unloading
diff --git a/src/main/java/org/bukkit/Server.java b/src/main/java/org/bukkit/Server.java
-index da5cab4246bd253fcc4e4d9574bdae1867ebb5ab..1982fc2d7f1cb80d3e324ee283211b251a976c6e 100644
+index da5cab4246bd253fcc4e4d9574bdae1867ebb5ab..e43fef0152468944d8a33036344a43e95fe58476 100644
--- a/src/main/java/org/bukkit/Server.java
+++ b/src/main/java/org/bukkit/Server.java
@@ -622,34 +622,55 @@ public interface Server extends PluginMessageRecipient, net.kyori.adventure.audi
@@ -65,7 +65,7 @@ index da5cab4246bd253fcc4e4d9574bdae1867ebb5ab..1982fc2d7f1cb80d3e324ee283211b25
+ // Paper start
+ /**
-+ * Gets whether the worlds are being ticked right not or not.
++ * Gets whether the worlds are being ticked right now.
+ *
+ * @return true if the worlds are being ticked, false otherwise.
+ */
@@ -80,7 +80,7 @@ index da5cab4246bd253fcc4e4d9574bdae1867ebb5ab..1982fc2d7f1cb80d3e324ee283211b25
* getWorld(creator.name()).
+ *
+ * Do note that un/loading worlds mid-tick may have potential side effects, we strongly recommend
-+ * ensuring that you're not loading worlds midtick by checking {@link Bukkit#isTickingWorlds()}
++ * ensuring that you're not un/loading worlds midtick by checking {@link Bukkit#isTickingWorlds()}
*
* @param creator the options to use when creating the world
* @return newly created or loaded world
@@ -93,7 +93,7 @@ index da5cab4246bd253fcc4e4d9574bdae1867ebb5ab..1982fc2d7f1cb80d3e324ee283211b25
* Unloads a world with the given name.
+ *
+ * Do note that un/loading worlds mid-tick may have potential side effects, we strongly recommend
-+ * ensuring that you're not loading worlds midtick by checking {@link Bukkit#isTickingWorlds()}
++ * ensuring that you're not un/loading worlds midtick by checking {@link Bukkit#isTickingWorlds()}
*
* @param name Name of the world to unload
* @param save whether to save the chunks before unloading
@@ -106,7 +106,7 @@ index da5cab4246bd253fcc4e4d9574bdae1867ebb5ab..1982fc2d7f1cb80d3e324ee283211b25
* Unloads the given world.
+ *
+ * Do note that un/loading worlds mid-tick may have potential side effects, we strongly recommend
-+ * ensuring that you're not loading worlds midtick by checking {@link Bukkit#isTickingWorlds()}
++ * ensuring that you're not un/loading worlds midtick by checking {@link Bukkit#isTickingWorlds()}
*
* @param world the world to unload
* @param save whether to save the chunks before unloading
--
2.39.5
From 01a13871deefa50e186a10b63f71c5e0459e7d30 Mon Sep 17 00:00:00 2001
From: Spottedleaf <6100722+Spottedleaf@users.noreply.github.com>
Date: Mon, 26 Sep 2022 01:02:51 -0700
Subject: [PATCH 003/278] Rewrite chunk system (#8177)
Patch documentation to come
Issues with the old system that are fixed now:
- World generation does not scale with cpu cores effectively.
- Relies on the main thread for scheduling and maintaining chunk state, dropping chunk load/generate rates at lower tps.
- Unreliable prioritisation of chunk gen/load calls that block the main thread.
- Shutdown logic is utterly unreliable, as it has to wait for all chunks to unload - is it guaranteed that the chunk system is in a state on shutdown that it can reliably do this? Watchdog shutdown also typically failed due to thread checks, which is now resolved.
- Saving of data is not unified (i.e can save chunk data without saving entity data, poses problems for desync if shutdown is really abnormal.
- Entities are not loaded with chunks. This caused quite a bit of headache for Chunk#getEntities API, but now the new chunk system loads entities with chunks so that they are ready whenever the chunk loads in. Effectively brings the behavior back to 1.16 era, but still storing entities in their own separate regionfiles.
The above list is not complete. The patch documentation will complete it.
New chunk system hard relies on starlight and dataconverter, and most importantly the new concurrent utilities in ConcurrentUtil.
Some of the old async chunk i/o interface (i.e the old file io thread reroutes _some_ calls to the new file io thread) is kept for plugin compat reasons. It will be removed in the next major version of minecraft.
The old legacy chunk system patches have been moved to the removed folder in case we need them again.
---
.../0014-ChunkMapDistance-CME.patch | 0
.../0015-Do-not-copy-visible-chunks.patch | 0
.../0016-Chunk-debug-command.patch | 0
...7-Make-CallbackExecutor-strict-again.patch | 0
...19-Asynchronous-chunk-IO-and-loading.patch | 0
...k-Priority-Urgency-System-for-Chunks.patch | 0
...layer-View-Distance-API-placeholders.patch | 0
...more-aggressive-in-the-chunk-unload-.patch | 0
...isPrimaryThread-and-MinecraftServer-.patch | 0
.../0355-Fix-Light-Command.patch | 0
...393-Optimise-ArraySetSorted-removeIf.patch | 0
...-Chunk-Post-Processing-deadlock-risk.patch | 0
...rLevels-chunk-level-checking-methods.patch | 0
...mprove-Chunk-Status-Transition-Speed.patch | 0
...-server-to-unload-chunks-at-request-.patch | 0
...le-recursion-for-chunkholder-updates.patch | 0
...chunks-refusing-to-unload-at-low-TPS.patch | 0
...ket-level-changes-while-unloading-pl.patch | 0
...ket-level-changes-when-updating-chun.patch | 0
...alls-removing-tickets-for-sync-loads.patch | 0
...ite-entity-bounding-box-lookup-calls.patch | 0
...dition-of-entities-to-entity-ticklis.patch | 0
...ntity-loads-in-CraftChunk-getEntitie.patch | 0
.../0801-Actually-unload-POI-data.patch | 0
...0845-Replace-ticket-level-propagator.patch | 0
...3-Replace-player-chunk-loader-system.patch | 0
.../0859-Fix-save-problems-on-shutdown.patch | 0
patches/server/0004-Paper-config-files.patch | 17 +-
patches/server/0006-ConcurrentUtil.patch | 160 +-
patches/server/0008-MC-Utils.patch | 221 +-
patches/server/0009-Adventure.patch | 36 +-
patches/server/0012-Timings-v2.patch | 10 +-
...> 0013-Rewrite-dataconverter-system.patch} | 20 +-
...ight-engine.patch => 0014-Starlight.patch} | 447 +-
...eted.patch => 0015-Not-implemeneted.patch} | 0
.../server/0016-Rewrite-chunk-system.patch | 18056 ++++++++++++++++
...option-to-load-extra-plugin-jars-no.patch} | 2 +-
...ctus-bamboo-and-reed-growth-heights.patch} | 0
...unk-Unloads-based-on-Player-Movement.patch | 89 -
...igurable-baby-zombie-movement-speed.patch} | 0
...20-Configurable-fishing-time-ranges.patch} | 0
...-mobs-to-jump-and-take-water-damage.patch} | 0
...despawn-distances-for-living-entiti.patch} | 0
...-Allow-for-toggling-of-spawn-chunks.patch} | 2 +-
...k-and-tnt-entities-at-the-specified.patch} | 0
...ent-crashes-server-lists-and-Mojang.patch} | 16 +-
...0026-Implement-Paper-VersionChecker.patch} | 0
...-version-history-to-version-command.patch} | 2 +-
...=> 0028-Player-affects-spawning-API.patch} | 10 +-
...29-Further-improve-server-tick-loop.patch} | 25 +-
...30-Only-refresh-abilities-if-needed.patch} | 4 +-
...API.patch => 0031-Entity-Origin-API.patch} | 10 +-
...vent-tile-entity-and-entity-crashes.patch} | 8 +-
...figurable-top-of-nether-void-damage.patch} | 4 +-
...-before-converting-and-renaming-pla.patch} | 2 +-
... => 0035-Always-tick-falling-blocks.patch} | 2 +-
...ch => 0036-Configurable-end-credits.patch} | 4 +-
...explosions-processing-dead-entities.patch} | 0
...s.patch => 0038-Optimize-explosions.patch} | 6 +-
...=> 0039-Disable-explosion-knockback.patch} | 0
...under.patch => 0040-Disable-thunder.patch} | 4 +-
....patch => 0041-Disable-ice-and-snow.patch} | 4 +-
...-Configurable-mob-spawner-tick-rate.patch} | 0
...3-Implement-PlayerLocaleChangeEvent.patch} | 10 +-
...patch => 0044-Add-BeaconEffectEvent.patch} | 0
...igurable-container-update-tick-rate.patch} | 6 +-
...0046-Use-UserCache-for-player-heads.patch} | 0
...> 0047-Disable-spigot-tick-limiters.patch} | 4 +-
...=> 0048-Add-PlayerInitialSpawnEvent.patch} | 2 +-
...rable-Disabling-Cat-Chest-Detection.patch} | 0
...0-Ensure-commands-are-not-ran-async.patch} | 8 +-
...hunks-are-slime-spawn-chunks-toggle.patch} | 4 +-
...ch => 0052-Expose-server-CommandMap.patch} | 2 +-
...-informative-in-maxHealth-exception.patch} | 0
...> 0054-Ensure-inv-drag-is-in-bounds.patch} | 0
...0055-Player-Tab-List-and-Title-APIs.patch} | 4 +-
...d-configurable-portal-search-radius.patch} | 4 +-
...patch => 0057-Add-velocity-warnings.patch} | 8 +-
...le-inter-world-teleportation-safety.patch} | 4 +-
... 0059-Add-exception-reporting-event.patch} | 22 +-
...don-t-need-to-when-cerealising-text.patch} | 0
...reboards-for-non-players-by-default.patch} | 4 +-
...orking-with-arrows-stuck-in-living-.patch} | 0
....patch => 0063-Chunk-Save-Reattempt.patch} | 10 +-
... => 0064-Complete-resource-pack-API.patch} | 4 +-
...ding-permissions.yml-before-plugins.patch} | 2 +-
...low-Reloading-of-Custom-Permissions.patch} | 2 +-
...h => 0067-Remove-Metadata-on-reload.patch} | 2 +-
...68-Handle-Item-Meta-Inconsistencies.patch} | 0
...rable-Non-Player-Arrow-Despawn-Rate.patch} | 0
...atch => 0070-Add-World-Util-Methods.patch} | 4 +-
...-Custom-replacement-for-eaten-items.patch} | 0
...h-absorb-values-and-repair-bad-data.patch} | 4 +-
...73-Use-a-Shared-Random-for-Entities.patch} | 4 +-
...e-spawn-chances-for-skeleton-horses.patch} | 4 +-
...dBounds-and-getBlockState-for-inlin.patch} | 26 +-
...kPhysicsEvent-if-a-plugin-has-a-lis.patch} | 10 +-
...ntity-AddTo-RemoveFrom-World-Events.patch} | 6 +-
...8-Configurable-Chunk-Inhabited-Time.patch} | 4 +-
...t.patch => 0079-EntityPathfindEvent.patch} | 0
...gionFileCache-and-make-configurable.patch} | 2 +-
...-Do-not-load-chunks-for-Pathfinding.patch} | 0
...082-Add-PlayerUseUnknownEntityEvent.patch} | 0
...Configurable-Grass-Spread-Tick-Rate.patch} | 0
...-BlockPlaceEvent-triggering-physics.patch} | 4 +-
...its.patch => 0085-Optimize-DataBits.patch} | 0
...illa-per-world-scoreboard-coloring-.patch} | 0
... 0087-Configurable-Player-Collision.patch} | 4 +-
...nt-to-allow-plugins-to-handle-clien.patch} | 0
...> 0089-Configurable-RCON-IP-address.patch} | 0
...tyRegainHealthEvent-isFastRegen-API.patch} | 0
...to-configure-frosted_ice-properties.patch} | 0
...possibility-for-getServer-singleton.patch} | 4 +-
...tem-frames-performance-and-bug-fixe.patch} | 4 +-
...API-Replenishable-Lootables-Feature.patch} | 2 +-
...-scoreboard-teams-to-scoreboard.dat.patch} | 0
...em-property-for-disabling-watchdoge.patch} | 4 +-
... 0097-Async-GameProfileCache-saving.patch} | 8 +-
...-Optional-TNT-doesn-t-move-in-water.patch} | 0
...-redstone-torch-rapid-clock-removal.patch} | 2 +-
...h => 0100-Add-server-name-parameter.patch} | 0
...n-Wither-Death-sounds-to-same-world.patch} | 18 +-
...tch => 0102-Fix-Old-Sign-Conversion.patch} | 0
...locking-on-Network-Manager-creation.patch} | 0
...-profiles-that-have-no-UUID-and-no-.patch} | 0
...etting-for-proxy-online-mode-status.patch} | 2 +-
...timise-BlockState-s-hashCode-equals.patch} | 0
...nfigurable-packet-in-spam-threshold.patch} | 0
...8-Configurable-flying-kick-messages.patch} | 0
...nt.patch => 0109-Add-EntityZapEvent.patch} | 0
...-from-ArmorStand-and-SpawnEgg-items.patch} | 0
...11-Cache-user-authenticator-threads.patch} | 0
...-Allow-Reloading-of-Command-Aliases.patch} | 2 +-
...-Add-source-to-PlayerExpChangeEvent.patch} | 0
... => 0114-Add-ProjectileCollideEvent.patch} | 0
...ent-Pathfinding-out-of-World-Border.patch} | 0
...mize-World.isLoaded-BlockPosition-Z.patch} | 2 +-
...Bound-Treasure-Maps-to-World-Border.patch} | 2 +-
...igurable-Cartographer-Treasure-Maps.patch} | 0
... => 0119-Optimize-ItemStack.isEmpty.patch} | 0
...o-control-if-armour-stands-can-move.patch} | 0
...=> 0121-String-based-Action-Bar-API.patch} | 4 +-
...2-Properly-fix-item-duplication-bug.patch} | 4 +-
...-API-s.patch => 0123-Firework-API-s.patch} | 0
... 0124-PlayerTeleportEndGatewayEvent.patch} | 0
...ovide-E-TE-Chunk-count-stat-methods.patch} | 4 +-
...h => 0126-Enforce-Sync-Player-Saves.patch} | 2 +-
...low-entities-to-ride-themselves-572.patch} | 4 +-
...I-for-Reason-Source-Triggering-play.patch} | 4 +-
...patch => 0129-Cap-Entity-Collisions.patch} | 2 +-
...-CraftScheduler-Async-Task-Debugger.patch} | 0
...> 0131-Do-not-let-armorstands-drown.patch} | 0
...e-async-calls-to-restart-the-server.patch} | 8 +-
...e-parrots-stay-on-shoulders-despite.patch} | 0
...-option-to-prevent-player-names-fro.patch} | 2 +-
...leAppender-for-console-improvements.patch} | 12 +-
...rable-option-to-disable-creeper-lin.patch} | 0
....patch => 0137-Item-canEntityPickup.patch} | 0
...layerPickupItemEvent-setFlyAtPlayer.patch} | 0
...> 0139-PlayerAttemptPickupItemEvent.patch} | 0
...profile-lookups-to-worldgen-threads.patch} | 0
...tch => 0141-Add-UnknownCommandEvent.patch} | 2 +-
...tch => 0142-Basic-PlayerProfile-API.patch} | 8 +-
... 0143-Shoulder-Entities-Release-API.patch} | 0
...patch => 0144-Profile-Lookup-Events.patch} | 0
...layer-logins-during-server-shutdown.patch} | 0
...patch => 0146-Entity-fromMobSpawner.patch} | 6 +-
...7-Improve-the-Saddle-API-for-Horses.patch} | 0
...plement-ensureServerConversions-API.patch} | 0
...> 0149-Implement-getI18NDisplayName.patch} | 0
...=> 0150-ProfileWhitelistVerifyEvent.patch} | 2 +-
...ch => 0151-Fix-this-stupid-bullshit.patch} | 0
...atch => 0152-LivingEntity-setKiller.patch} | 0
...wns-should-honor-nametags-and-leash.patch} | 0
...mer-when-spawner-event-is-cancelled.patch} | 0
...a-custom-authentication-servers-dow.patch} | 0
...-prefixes-using-Log4J-configuration.patch} | 0
...-Log4J-Configuration-Plugin-Loggers.patch} | 0
...t.patch => 0158-Add-PlayerJumpEvent.patch} | 0
...le-ServerboundKeepAlivePacket-async.patch} | 0
...t-protocol-version-and-virtual-host.patch} | 14 +-
...t-serverside-behavior-of-keepalives.patch} | 0
...Effects-only-to-players-who-can-see.patch} | 0
... => 0163-Add-PlayerArmorChangeEvent.patch} | 0
...om-being-processed-when-the-player-.patch} | 0
...5-Fix-MC-117075-TE-Unload-Lag-Spike.patch} | 8 +-
...-implementations-for-captured-block.patch} | 4 +-
...get-a-BlockState-without-a-snapshot.patch} | 0
...patch => 0168-AsyncTabCompleteEvent.patch} | 2 +-
...=> 0169-PlayerPickupExperienceEvent.patch} | 0
...-Ability-to-apply-mending-to-XP-API.patch} | 4 +-
...-PlayerNaturallySpawnCreaturesEvent.patch} | 16 +-
...Add-setPlayerProfile-API-for-Skulls.patch} | 0
...patch => 0173-PreCreatureSpawnEvent.patch} | 0
...> 0174-Fill-Profile-Property-Events.patch} | 0
...layerAdvancementCriterionGrantEvent.patch} | 0
...ch => 0176-Add-ArmorStand-Item-Meta.patch} | 0
...Extend-Player-Interact-cancellation.patch} | 0
... 0178-Tameable-getOwnerUniqueId-API.patch} | 0
...-crits-helps-mitigate-hacked-client.patch} | 0
...e-Explicit-Network-Manager-Flushing.patch} | 6 +-
...t-extended-PaperServerListPingEvent.patch} | 4 +-
...-PlayerProfile-in-AsyncPreLoginEven.patch} | 0
...=> 0183-Player.setPlayerProfile-API.patch} | 12 +-
...patch => 0184-getPlayerUniqueId-API.patch} | 2 +-
... 0185-Improved-Async-Task-Scheduler.patch} | 0
...e-legacy-ping-handler-more-reliable.patch} | 0
...erverListPingEvent-for-legacy-pings.patch} | 0
...8-Flag-to-disable-the-channel-limit.patch} | 4 +-
...-Add-openSign-method-to-HumanEntity.patch} | 0
...rable-sprint-interruption-on-attack.patch} | 0
...allowed-colored-signs-to-be-created.patch} | 0
...t.patch => 0192-EndermanEscapeEvent.patch} | 0
...h => 0193-Enderman.teleportRandomly.patch} | 0
...194-Block-Enderpearl-Travel-Exploit.patch} | 4 +-
...d.spawnParticle-API-and-add-Builder.patch} | 8 +-
...ted-Ice-from-loading-holding-chunks.patch} | 0
...h => 0197-EndermanAttackPlayerEvent.patch} | 0
...tch => 0198-WitchConsumePotionEvent.patch} | 0
...patch => 0199-WitchThrowPotionEvent.patch} | 0
...tem-entities-with-World.spawnEntity.patch} | 0
...patch => 0201-WitchReadyPotionEvent.patch} | 0
...202-ItemStack-getMaxItemUseDuration.patch} | 2 +-
...ement-EntityTeleportEndGatewayEvent.patch} | 0
...d-flag-on-cancel-of-Explosion-Event.patch} | 0
...ch => 0205-Fix-CraftEntity-hashCode.patch} | 0
...e-Alternative-LootPool-Luck-Formula.patch} | 0
...ls-when-failing-to-save-player-data.patch} | 2 +-
...-shield-blocking-delay-configurable.patch} | 0
...=> 0209-Improve-EntityShootBowEvent.patch} | 0
...patch => 0210-PlayerReadyArrowEvent.patch} | 0
...lement-EntityKnockbackByEntityEvent.patch} | 0
...patch => 0212-Expand-Explosions-API.patch} | 4 +-
...vingEntity-Hand-Raised-Item-Use-API.patch} | 0
...-API.patch => 0214-RangedEntity-API.patch} | 0
...o-disable-ender-dragon-legacy-check.patch} | 0
...-Implement-World.getEntity-UUID-API.patch} | 4 +-
...0217-InventoryCloseEvent-Reason-API.patch} | 22 +-
...tch => 0218-Vex-get-setSummoner-API.patch} | 0
...ventory-when-cancelling-PlayerInter.patch} | 0
...-to-keep-logging-IO-off-main-thread.patch} | 0
...more-information-to-Entity.toString.patch} | 4 +-
...tMagicNumbers.isSupportedApiVersion.patch} | 0
...ts.patch => 0223-EnderDragon-Events.patch} | 0
...atch => 0224-PlayerElytraBoostEvent.patch} | 0
...=> 0225-PlayerLaunchProjectileEvent.patch} | 0
...0226-Improve-BlockPosition-inlining.patch} | 0
...-armor-stands-from-doing-entity-loo.patch} | 4 +-
...-Vanished-players-don-t-have-rights.patch} | 2 +-
...llow-disabling-armour-stand-ticking.patch} | 0
...tch => 0230-SkeletonHorse-Additions.patch} | 4 +-
...n-t-call-getItemMeta-on-hasItemMeta.patch} | 2 +-
...2-Implement-Expanded-ArmorStand-API.patch} | 0
...vent.patch => 0233-AnvilDamageEvent.patch} | 0
...h => 0234-Add-hand-to-bucket-events.patch} | 0
...ent.patch => 0235-Add-TNTPrimeEvent.patch} | 2 +-
...d-make-tab-spam-limits-configurable.patch} | 0
...-Experience-should-save-as-Integers.patch} | 0
...emove-unnecessary-itemmeta-handling.patch} | 0
...es-option-to-debug-dupe-uuid-issues.patch} | 14 +-
...d-Early-Warning-Feature-to-WatchDog.patch} | 20 +-
...1-Use-ConcurrentHashMap-in-JsonList.patch} | 2 +-
...2-Use-a-Queue-for-Queueing-Commands.patch} | 12 +-
...le-Entities-from-a-chunk-without-sn.patch} | 6 +-
...timize-BlockPosition-helper-methods.patch} | 0
...efault-mob-spawn-range-and-water-an.patch} | 0
...tch => 0246-Slime-Pathfinder-Events.patch} | 0
...e-speed-for-water-flowing-over-lava.patch} | 0
...48-Optimize-CraftBlockData-Creation.patch} | 6 +-
...tch => 0249-Optimize-MappedRegistry.patch} | 0
...ch => 0250-Add-PhantomPreSpawnEvent.patch} | 0
....patch => 0251-Add-More-Creeper-API.patch} | 0
...=> 0252-Inventory-removeItemAnySlot.patch} | 0
...oadChunk-int-int-false-load-unconve.patch} | 4 +-
...ray-tracing-methods-to-LivingEntity.patch} | 4 +-
...-attack-cooldown-methods-for-Player.patch} | 4 +-
....patch => 0256-Improve-death-events.patch} | 12 +-
...w-chests-to-be-placed-with-NBT-data.patch} | 0
...I.patch => 0258-Mob-Pathfinding-API.patch} | 0
...for-CanPlaceOn-and-CanDestroy-NBT-v.patch} | 0
...nt-chunk-loading-from-Fluid-Flowing.patch} | 0
...nt-Mob-AI-Rules-from-Loading-Chunks.patch} | 0
...ning-from-loading-generating-chunks.patch} | 0
...t-furnace-cook-speed-multiplier-API.patch} | 0
...rseException-in-Entity-and-TE-names.patch} | 8 +-
...=> 0265-Honor-EntityAgeable.ageLock.patch} | 0
...le-connection-throttle-kick-message.patch} | 0
...> 0267-Hook-into-CB-plugin-rewrites.patch} | 0
....patch => 0268-PreSpawnerSpawnEvent.patch} | 0
...69-Add-LivingEntity-getTargetEntity.patch} | 0
...I.patch => 0270-Add-sun-related-API.patch} | 4 +-
...Turtle-API.patch => 0271-Turtle-API.patch} | 0
...ator-target-events-and-improve-impl.patch} | 4 +-
...her-worlds-for-shooter-of-projectil.patch} | 0
...PI.patch => 0274-Add-more-Witch-API.patch} | 0
...wned-for-Villager-Aggression-Config.patch} | 0
...vent-players-from-moving-into-unloa.patch} | 0
...7-Reset-players-airTicks-on-respawn.patch} | 4 +-
...after-profile-lookups-if-not-needed.patch} | 0
...r-Thread-Pool-and-Thread-Priorities.patch} | 2 +-
...=> 0280-Optimize-World-Time-Updates.patch} | 4 +-
...tore-custom-InventoryHolder-support.patch} | 0
...=> 0282-Use-Vanilla-Minecart-Speeds.patch} | 0
...0283-Fix-SpongeAbsortEvent-handling.patch} | 0
...-allow-digging-into-unloaded-chunks.patch} | 0
...ult-permission-message-configurable.patch} | 6 +-
...revent-rayTrace-from-loading-chunks.patch} | 0
...-Large-Packets-disconnecting-client.patch} | 4 +-
...ntity-dismount-during-teleportation.patch} | 14 +-
...I.patch => 0289-Add-more-Zombie-API.patch} | 0
...mits.patch => 0290-Book-Size-Limits.patch} | 0
...0291-Add-PlayerConnectionCloseEvent.patch} | 4 +-
...revent-Enderman-from-loading-chunks.patch} | 0
...replace-OfflinePlayer-getLastPlayed.patch} | 16 +-
...ehicle-tracking-issue-on-disconnect.patch} | 4 +-
...remove-from-being-called-on-Players.patch} | 4 +-
...ent.patch => 0296-BlockDestroyEvent.patch} | 4 +-
... => 0297-Async-command-map-building.patch} | 4 +-
...0298-Implement-Brigadier-Mojang-API.patch} | 0
...m-Shapeless-Custom-Crafting-Recipes.patch} | 0
... 0300-Limit-Client-Sign-length-more.patch} | 0
...onvertSigns-boolean-every-sign-save.patch} | 0
...Manager-and-add-advanced-packet-sup.patch} | 26 +-
...e-Oversized-Tile-Entities-in-chunks.patch} | 0
...t-tick-at-start-of-drowning-process.patch} | 0
...ggleEvent-when-whitelist-is-toggled.patch} | 2 +-
...=> 0306-Entity-getEntitySpawnReason.patch} | 12 +-
...ty-Metadata-for-all-tracked-players.patch} | 0
...tch => 0308-Fire-event-on-GS4-query.patch} | 0
...09-Implement-PlayerPostRespawnEvent.patch} | 2 +-
...for-pickupDelay-breaks-picking-up-i.patch} | 0
...ts.patch => 0311-Server-Tick-Events.patch} | 6 +-
...312-PlayerDeathEvent-getItemsToKeep.patch} | 6 +-
...Optimize-Captured-TileEntity-Lookup.patch} | 6 +-
...API.patch => 0314-Add-Heightmap-API.patch} | 2 +-
...> 0315-Mob-Spawner-API-Enhancements.patch} | 0
...-to-changed-postToMainThread-method.patch} | 0
...-item-frames-are-modified-MC-123450.patch} | 0
...0318-Implement-CraftBlockSoundGroup.patch} | 0
...e-Keep-Spawn-Loaded-range-per-world.patch} | 10 +-
...20-Allow-Saving-of-Oversized-Chunks.patch} | 4 +-
...21-Expose-the-internal-current-tick.patch} | 2 +-
...22-Fix-World-isChunkGenerated-calls.patch} | 60 +-
...te-location-if-we-failed-to-read-it.patch} | 0
...l-Spawned-mobs-towards-natural-spaw.patch} | 0
...urable-projectile-relative-velocity.patch} | 0
...h => 0326-offset-item-frame-ticking.patch} | 0
...-158900.patch => 0327-Fix-MC-158900.patch} | 2 +-
...event-consuming-the-wrong-itemstack.patch} | 0
...9-Dont-send-unnecessary-sign-update.patch} | 0
...-option-to-disable-pillager-patrols.patch} | 0
...331-Flat-bedrock-generator-settings.patch} | 2 +-
...k-loads-when-villagers-try-to-find-.patch} | 0
...656-Fix-Follow-Range-Initial-Target.patch} | 0
... 0334-Duplicate-UUID-Resolve-Option.patch} | 51 +-
...pers.patch => 0335-Optimize-Hoppers.patch} | 4 +-
...ayerDeathEvent-shouldDropExperience.patch} | 4 +-
...ading-chunks-checking-hive-position.patch} | 0
...hunks-from-Hoppers-and-other-things.patch} | 0
...ializing-mismatching-chunk-coordina.patch} | 8 +-
...imise-IEntityAccess-getPlayerByUUID.patch} | 8 +-
...341-Fix-items-not-falling-correctly.patch} | 4 +-
...patch => 0342-Lag-compensate-eating.patch} | 0
...ize-call-to-getFluid-for-explosions.patch} | 0
...-in-stack-not-having-effects-when-d.patch} | 0
...Add-effect-to-block-break-naturally.patch} | 0
...=> 0346-Entity-Activation-Range-2.0.patch} | 42 +-
...h => 0347-Increase-Light-Queue-Size.patch} | 2 +-
...6-Anti-Xray.patch => 0348-Anti-Xray.patch} | 70 +-
...ement-alternative-item-despawn-rate.patch} | 0
...=> 0350-Tracking-Range-Improvements.patch} | 4 +-
...-items-vanishing-through-end-portal.patch} | 4 +-
...ment-optional-per-player-mob-spawns.patch} | 58 +-
...et-gravity-in-void.-Fixes-MC-167279.patch} | 0
...-getChunkAt-calls-for-loaded-chunks.patch} | 8 +-
...0355-Add-debug-for-sync-chunk-loads.patch} | 24 +-
...6-Remove-garbage-Java-version-check.patch} | 0
...tch => 0357-Add-ThrownEggHatchEvent.patch} | 0
...p-API.patch => 0358-Entity-Jump-API.patch} | 0
...-to-nerf-pigmen-from-nether-portals.patch} | 6 +-
... => 0360-Make-the-GUI-graph-fancier.patch} | 0
...opper-searches-if-there-are-no-items.patch | 124 -
...61-add-hand-to-BlockMultiPlaceEvent.patch} | 0
...ipwire-hook-placement-before-update.patch} | 0
...o-allow-iron-golems-to-spawn-in-air.patch} | 0
...chance-of-villager-zombie-infection.patch} | 0
...tch => 0365-Optimise-Chunk-getFluid.patch} | 6 +-
...rbose-world-setting-to-false-by-def.patch} | 0
...Add-tick-times-API-and-mspt-command.patch} | 8 +-
...68-Expose-MinecraftServer-isRunning.patch} | 2 +-
...dd-Raw-Byte-ItemStack-Serialization.patch} | 0
...pawn-settings-and-per-player-option.patch} | 4 +-
...nections-shouldn-t-hold-up-shutdown.patch} | 2 +-
...ow-bees-to-load-chunks-for-beehives.patch} | 0
...PlayerChunkMap-adds-crashing-server.patch} | 10 +-
...tch => 0374-Don-t-tick-dead-players.patch} | 4 +-
...-Player-s-shouldn-t-be-able-to-move.patch} | 0
...timize-Collision-to-not-load-chunks.patch} | 4 +-
...ove-existing-players-to-world-spawn.patch} | 6 +-
...alSelector-Goal.Flag-Set-operations.patch} | 2 +-
...h => 0379-Improved-Watchdog-Support.patch} | 84 +-
....patch => 0380-Optimize-Pathfinding.patch} | 0
...1-Reduce-Either-Optional-allocation.patch} | 2 +-
...-memory-footprint-of-NBTTagCompound.patch} | 2 +-
...ent-opening-inventories-when-frozen.patch} | 6 +-
...entity-collision-code-if-not-needed.patch} | 0
...Implement-Player-Client-Options-API.patch} | 12 +-
...ayer-is-attempted-to-be-removed-fro.patch} | 6 +-
...-Broken-behavior-of-PlayerJoinEvent.patch} | 10 +-
...oad-Chunks-for-Login-Asynchronously.patch} | 18 +-
...awn-point-if-spawn-in-unloaded-worl.patch} | 4 +-
...layerAttackEntityCooldownResetEvent.patch} | 0
...-fire-BlockFade-on-worldgen-threads.patch} | 0
...tom-creative-and-insomniac-controls.patch} | 0
...-duplication-issues-and-teleport-is.patch} | 12 +-
...patch => 0394-Villager-Restocks-API.patch} | 0
...ickItem-Packet-and-kick-for-invalid.patch} | 0
...n.patch => 0396-Expose-game-version.patch} | 2 +-
...> 0397-Optimize-Voxel-Shape-Merging.patch} | 0
...per-thread-native-byte-buffer-cache.patch} | 0
....patch => 0399-misc-debugging-dumps.patch} | 10 +-
...0-Prevent-teleporting-dead-entities.patch} | 0
...traces-in-log-messages-crash-report.patch} | 10 +-
...atch => 0402-Implement-Mob-Goal-API.patch} | 2 +-
...=> 0403-Add-villager-reputation-API.patch} | 0
...maximum-exp-value-when-merging-orbs.patch} | 0
...tch => 0405-ExperienceOrbMergeEvent.patch} | 0
...-Fix-PotionEffect-ignores-icon-flag.patch} | 0
...brigadier-child-sorting-performance.patch} | 0
...API.patch => 0408-Potential-bed-API.patch} | 0
...ait-for-Async-Tasks-during-shutdown.patch} | 6 +-
...er-respects-game-and-entity-rules-f.patch} | 0
...nd-End-Portal-Frames-from-being-des.patch} | 8 +-
...leInt-allocations-from-light-engine.patch} | 2 +-
...location-of-Vec3D-by-entity-tracker.patch} | 8 +-
...> 0414-Ensure-safe-gateway-teleport.patch} | 0
...-for-console-having-all-permissions.patch} | 0
...rCloseEnoughForSpawning-to-use-dist.patch} | 96 +-
...ance-map-to-optimise-entity-tracker.patch} | 130 +-
...x-villager-trading-demand-MC-163962.patch} | 0
... => 0419-Maps-shouldn-t-load-chunks.patch} | 0
...okup-for-Treasure-Maps-Fixes-lag-fr.patch} | 0
...r-runTaskTimerAsynchronously-Plugin.patch} | 0
...ton-physics-inconsistency-MC-188840.patch} | 0
...uping.patch => 0423-Fix-sand-duping.patch} | 0
...sing-chunks-due-to-integer-overflow.patch} | 0
...desync-in-playerconnection-causing-.patch} | 0
...older-method-without-block-snapshot.patch} | 0
...API.patch => 0427-Improve-Arrow-API.patch} | 0
...mplement-PlayerRecipeBookClickEvent.patch} | 0
...-Hide-sync-chunk-writes-behind-flag.patch} | 0
...0-Add-permission-for-command-blocks.patch} | 0
...ure-Entity-AABB-s-are-never-invalid.patch} | 14 +-
...d-Difficulty-Remembering-Difficulty.patch} | 14 +-
...atch => 0433-Paper-dumpitem-command.patch} | 4 +-
...34-Don-t-allow-null-UUID-s-for-chat.patch} | 0
...Legacy-Component-serialization-size.patch} | 0
...Optimize-Bit-Operations-by-inlining.patch} | 0
...Plugin-Tickets-to-API-Chunk-Methods.patch} | 12 +-
...-incremental-chunk-and-player-saving.patch | 164 +
...e-operations-for-updating-light-dat.patch} | 2 +-
...440-Support-old-UUID-format-for-NBT.patch} | 0
...p-duplicated-GameProfile-Properties.patch} | 0
...vert-legacy-attributes-in-Item-Meta.patch} | 0
...Remove-some-streams-from-structures.patch} | 0
...rom-classes-related-villager-gossip.patch} | 0
...0445-Support-components-in-ItemMeta.patch} | 0
...rgetLivingEntityEvent-for-1.16-mobs.patch} | 0
...patch => 0447-Add-entity-liquid-API.patch} | 0
...date-itemstack-legacy-name-and-lore.patch} | 0
...wn-player-in-correct-world-on-login.patch} | 2 +-
...atch => 0450-Add-PrepareResultEvent.patch} | 0
...-incremental-chunk-and-player-saving.patch | 375 -
...-for-portal-on-world-gen-entity-add.patch} | 0
...e-NetworkManager-Exception-Handling.patch} | 0
...ncement-data-player-iteration-to-be.patch} | 0
...x-arrows-never-despawning-MC-125757.patch} | 0
...Vanilla-Command-permission-checking.patch} | 0
...-5989.patch => 0456-Fix-SPIGOT-5989.patch} | 2 +-
...-Bukkit-world-container-is-not-used.patch} | 0
...5885-Unable-to-disable-advancements.patch} | 0
...taPlayer-leak-due-from-quitting-ear.patch} | 2 +-
...eLighting-call-to-World-spigot-stri.patch} | 4 +-
...ix-some-rails-connecting-improperly.patch} | 2 +-
...stake-in-CB-NBT-int-deserialization.patch} | 0
...rver-load-chunks-from-newer-version.patch} | 4 +-
...support.patch => 0464-Brand-support.patch} | 4 +-
...patch => 0465-Add-setMaxPlayers-API.patch} | 4 +-
...PickupItemAnimation-to-LivingEntity.patch} | 0
...h => 0467-Don-t-require-FACING-data.patch} | 0
...eEvent-not-firing-for-all-use-cases.patch} | 6 +-
...PI.patch => 0469-Add-moon-phase-API.patch} | 0
...headless-pistons-from-being-created.patch} | 0
...ent.patch => 0471-Add-BellRingEvent.patch} | 0
...dd-zombie-targets-turtle-egg-config.patch} | 0
...patch => 0473-Buffer-joins-to-world.patch} | 4 +-
...-Eigencraft-redstone-implementation.patch} | 0
...s-not-working-in-some-kick-messages.patch} | 0
...reateEvent-needs-to-know-its-entity.patch} | 2 +-
...ch => 0477-Fix-CraftTeam-null-check.patch} | 0
...I.patch => 0478-Add-more-Evoker-API.patch} | 0
...Add-methods-to-get-translation-keys.patch} | 0
...te-HoverEvent-from-ItemStack-Entity.patch} | 0
...ch => 0481-Cache-block-data-strings.patch} | 4 +-
...rtation-and-cancel-velocity-if-tele.patch} | 4 +-
...l-open-container-api-to-HumanEntity.patch} | 0
...aFixerUpper-Rewrite-Rules-on-demand.patch} | 0
...-capture-to-capture-all-items-added.patch} | 4 +-
...-Counter-to-allow-plugins-to-use-va.patch} | 4 +-
...track-plugin-scoreboards-by-default.patch} | 0
...king.patch => 0488-Entity-isTicking.patch} | 4 +-
...non-whitelisted-player-when-white-l.patch} | 4 +-
...-Concurrency-issue-in-ShufflingList.patch} | 0
...eset-Ender-Crystals-on-Dragon-Spawn.patch} | 0
...-large-move-vectors-crashing-server.patch} | 2 +-
...atch => 0493-Optimise-getType-calls.patch} | 2 +-
....patch => 0494-Villager-resetOffers.patch} | 0
...nig-for-some-hot-IBlockData-methods.patch} | 16 +-
...ce-order-when-capturing-blockstates.patch} | 4 +-
...lockpos-allocation-from-pathfinding.patch} | 2 +-
...em-locations-dropped-from-campfires.patch} | 0
...rty-in-invalid-locations-SPIGOT-6086.patch | 18 -
...tch => 0499-Player-elytra-boost-API.patch} | 4 +-
...00-Fixed-TileEntityBell-memory-leak.patch} | 0
...ing-up-when-item-stack-is-empty-in-.patch} | 0
...Add-getOfflinePlayerIfCached-String.patch} | 2 +-
...ch => 0503-Add-ignore-discounts-API.patch} | 0
...Toggle-for-removing-existing-dragon.patch} | 0
...x-client-lag-on-advancement-loading.patch} | 0
...> 0506-Item-no-age-no-player-pickup.patch} | 0
...er-Remove-Streams-Optimized-collect.patch} | 0
...508-Beacon-API-custom-effect-ranges.patch} | 0
...tch => 0509-Add-API-for-quit-reason.patch} | 10 +-
...ng-Trader-spawn-rate-config-options.patch} | 0
...ch => 0511-Expose-world-spawn-angle.patch} | 2 +-
...patch => 0512-Add-Destroy-Speed-API.patch} | 0
...-spawnParticle-x-y-z-precision-loss.patch} | 4 +-
...14-Add-LivingEntity-clearActiveItem.patch} | 0
...=> 0515-Add-PlayerItemCooldownEvent.patch} | 0
...rove-performance-of-the-end-generat.patch} | 0
...PI.patch => 0517-More-lightning-API.patch} | 0
...should-not-bypass-cramming-gamerule.patch} | 4 +-
...-missing-default-perms-for-commands.patch} | 0
...h => 0520-Add-PlayerShearBlockEvent.patch} | 0
...ng-zombie-villager-discount-exploit.patch} | 0
....patch => 0522-Limit-recipe-packets.patch} | 0
...-CraftSound-backwards-compatibility.patch} | 0
...524-Player-Chunk-Load-Unload-Events.patch} | 4 +-
...5-Optimize-Dynamic-get-Missing-Keys.patch} | 0
...-Expose-LivingEntity-hurt-direction.patch} | 0
...OBSTRUCTED-reason-to-BedEnterResult.patch} | 0
...-invalid-ingredient-lists-in-Villag.patch} | 0
...rTradeEvent-and-PlayerPurchaseEvent.patch} | 0
...ch => 0530-Implement-TargetHitEvent.patch} | 0
... 0531-MC-4-Fix-item-position-desync.patch} | 10 +-
...532-Additional-Block-Material-API-s.patch} | 0
...tch => 0533-Fix-harming-potion-dupe.patch} | 0
...get-Material-from-Boats-and-Minecar.patch} | 0
....patch => 0535-Cache-burn-durations.patch} | 0
...ob-spawner-spawn-egg-transformation.patch} | 0
...ix-Not-a-string-Map-Conversion-spam.patch} | 0
...ment-PlayerFlowerPotManipulateEvent.patch} | 0
...event-not-being-called-in-adventure.patch} | 0
...h => 0540-Zombie-API-breaking-doors.patch} | 0
...541-Fix-nerfed-slime-when-splitting.patch} | 0
...=> 0542-Add-EntityLoadCrossbowEvent.patch} | 0
...ch => 0543-Guardian-beam-workaround.patch} | 0
...0544-Added-WorldGameRuleChangeEvent.patch} | 6 +-
...-Added-ServerResourcesReloadedEvent.patch} | 6 +-
...d-settings-for-mobs-picking-up-loot.patch} | 0
...mplemented-BlockFailedDispenseEvent.patch} | 0
...-Added-PlayerLecternPageChangeEvent.patch} | 0
...-Added-PlayerLoomPatternSelectEvent.patch} | 0
...nfigurable-door-breaking-difficulty.patch} | 0
...ty-commands-shall-not-be-dispatched.patch} | 0
...I-to-expose-exact-interaction-point.patch} | 0
...OIs.patch => 0553-Remove-stale-POIs.patch} | 4 +-
...h => 0554-Fix-villager-boat-exploit.patch} | 2 +-
...I.patch => 0555-Add-sendOpLevel-API.patch} | 6 +-
...try.patch => 0556-Add-PaperRegistry.patch} | 4 +-
...h => 0557-Add-StructuresLocateEvent.patch} | 2 +-
...-for-requiring-a-player-participant.patch} | 4 +-
...leHitEvent-call-when-fireballs-dead.patch} | 0
...nent-with-empty-text-instead-of-thr.patch} | 0
...561-Make-schedule-command-per-world.patch} | 0
...562-Configurable-max-leash-distance.patch} | 0
...563-Implement-BlockPreDispenseEvent.patch} | 0
...ng-of-PlayerChangeBeaconEffectEvent.patch} | 0
...e-for-always-placing-the-dragon-egg.patch} | 0
...-PlayerStonecutterRecipeSelectEvent.patch} | 0
...eash-variable-to-EntityUnleashEvent.patch} | 0
...shield-blocking-on-dimension-change.patch} | 4 +-
...atch => 0569-add-DragonEggFormEvent.patch} | 0
...Event.patch => 0570-EntityMoveEvent.patch} | 6 +-
...isable-pathfinding-updates-on-block.patch} | 6 +-
... 0572-Inline-shift-direction-fields.patch} | 0
...-adding-items-to-BlockDropItemEvent.patch} | 0
...inThreadExecutor-to-BukkitScheduler.patch} | 0
...entity-allow-attribute-registration.patch} | 0
...ix-dead-slime-setSize-invincibility.patch} | 0
...pes-should-return-an-immutable-list.patch} | 0
...port-for-hex-color-codes-in-console.patch} | 4 +-
...atch => 0579-Expose-Tracked-Players.patch} | 0
...0-Remove-streams-from-SensorNearest.patch} | 0
...er-exception-on-empty-JsonList-file.patch} | 0
...GUI.patch => 0582-Improve-ServerGUI.patch} | 0
...ure-plate-EntityInteractEvent-for-i.patch} | 0
...584-fix-converting-txt-to-json-file.patch} | 6 +-
...atch => 0585-Add-worldborder-events.patch} | 0
...=> 0586-added-PlayerNameEntityEvent.patch} | 0
...grindstones-from-overstacking-items.patch} | 0
...h => 0588-Add-recipe-to-cook-events.patch} | 0
...patch => 0589-Add-Block-isValidTool.patch} | 0
...using-signs-inside-spawn-protection.patch} | 0
....patch => 0591-Expand-world-key-API.patch} | 2 +-
...ternative-constructor-for-Rotations.patch} | 0
...y-API.patch => 0593-Item-Rarity-API.patch} | 0
...imer-for-Wandering-Traders-spawned-.patch} | 0
...py-TESign-isEditable-from-snapshots.patch} | 0
...d-item-when-player-has-disconnected.patch} | 2 +-
...elist-use-configurable-kick-message.patch} | 4 +-
...gnore-result-of-PlayerEditBookEvent.patch} | 0
...99-Entity-load-save-limit-per-chunk.patch} | 38 +-
...tch => 0600-Expose-protocol-version.patch} | 0
...ab-completions-for-brigadier-comman.patch} | 2 +-
...temConsumeEvent-cancelling-properly.patch} | 0
...patch => 0603-Add-bypass-host-check.patch} | 0
...0604-Set-area-affect-cloud-rotation.patch} | 0
...add-isDeeplySleeping-to-HumanEntity.patch} | 0
...add-consumeFuel-to-FurnaceBurnEvent.patch} | 0
...-set-drop-chance-to-EntityEquipment.patch} | 0
...ix-PigZombieAngerEvent-cancellation.patch} | 0
...-checkReach-check-for-Shulker-boxes.patch} | 0
...ix-PlayerItemHeldEvent-firing-twice.patch} | 0
... => 0611-Added-PlayerDeepSleepEvent.patch} | 0
...ld-API.patch => 0612-More-World-API.patch} | 4 +-
... 0613-Added-PlayerBedFailEnterEvent.patch} | 0
...-to-convert-between-Component-and-B.patch} | 2 +-
...n-acting-as-a-bed-respawn-from-the-.patch} | 2 +-
...acon-activation-deactivation-events.patch} | 0
...-RespawnFlags-to-PlayerRespawnEvent.patch} | 2 +-
...dd-Channel-initialization-listeners.patch} | 6 +-
...mands-if-tab-completion-is-disabled.patch} | 0
...> 0620-Add-more-WanderingTrader-API.patch} | 0
...dd-EntityBlockStorage-clearEntities.patch} | 0
...ssage-to-PlayerAdvancementDoneEvent.patch} | 0
...address-to-AsyncPlayerPreLoginEvent.patch} | 0
...close.patch => 0624-Inventory-close.patch} | 0
...reateEvent-players-and-end-platform.patch} | 4 +-
...-in-sunlight-API-for-Phantoms-and-S.patch} | 0
...=> 0627-Fix-CraftPotionBrewer-cache.patch} | 0
...atch => 0628-Add-basic-Datapack-API.patch} | 2 +-
...ment-variable-to-disable-server-gui.patch} | 0
...itions-to-PlayerGameModeChangeEvent.patch} | 12 +-
... => 0631-ItemStack-repair-check-API.patch} | 0
....patch => 0632-More-Enchantment-API.patch} | 0
...ve-range-check-for-block-placing-up.patch} | 0
...-and-optimise-world-force-upgrading.patch} | 8 +-
...PI.patch => 0635-Add-Mob-lookAt-API.patch} | 0
...0636-Add-Unix-domain-socket-support.patch} | 6 +-
... => 0637-Add-EntityInsideBlockEvent.patch} | 0
...38-Attributes-API-for-item-defaults.patch} | 0
...ause-to-Weather-ThunderChangeEvents.patch} | 14 +-
...patch => 0640-More-Lidded-Block-API.patch} | 0
...41-Limit-item-frame-cursors-on-maps.patch} | 0
... => 0642-Add-PlayerKickEvent-causes.patch} | 12 +-
...0643-Add-PufferFishStateChangeEvent.patch} | 0
...erBucketEmptyEvent-result-itemstack.patch} | 0
...tedContainer-instead-of-ThreadingDe.patch} | 2 +-
...-to-fix-items-merging-through-walls.patch} | 0
...h => 0647-Add-BellRevealRaiderEvent.patch} | 0
... 0648-Fix-invulnerable-end-crystals.patch} | 0
...49-Add-ElderGuardianAppearanceEvent.patch} | 0
...0650-Fix-dangerous-end-portal-logic.patch} | 6 +-
...-Biome-Mob-Lookups-for-Mob-Spawning.patch} | 0
...-Make-item-validations-configurable.patch} | 0
...patch => 0653-Line-Of-Sight-Changes.patch} | 0
... => 0654-add-per-world-spawn-limits.patch} | 2 +-
...plashEvent-for-water-splash-potions.patch} | 0
... => 0656-Add-more-LimitedRegion-API.patch} | 0
...layerDropItemEvent-using-wrong-item.patch} | 4 +-
...=> 0658-Missing-Entity-Behavior-API.patch} | 4 +-
...ect-for-book-edit-is-called-on-main.patch} | 0
...of-Block-applyBoneMeal-always-being.patch} | 0
...tChunkIfLoadedImmediately-in-places.patch} | 6 +-
...rom-signs-not-firing-command-events.patch} | 0
...ch => 0663-Adds-PlayerArmSwingEvent.patch} | 0
...-event-leave-message-not-being-sent.patch} | 6 +-
...-for-mobs-immune-to-default-effects.patch} | 4 +-
...correct-message-for-outdated-client.patch} | 0
...-t-apply-cramming-damage-to-players.patch} | 4 +-
...d-timings-for-sensors-and-behaviors.patch} | 0
...-bunch-of-missing-forceDrop-toggles.patch} | 0
...inger-API.patch => 0670-Stinger-API.patch} | 0
...cy-issue-with-empty-map-items-in-CB.patch} | 0
... => 0672-Add-System.out-err-catcher.patch} | 2 +-
... => 0673-Fix-test-not-bootstrapping.patch} | 0
...-to-contain-the-source-jars-in-stac.patch} | 0
...-Improve-boat-collision-performance.patch} | 2 +-
...AFK-kick-while-watching-end-credits.patch} | 0
...iting-of-comments-to-server.propert.patch} | 0
...tch => 0678-Add-PlayerSetSpawnEvent.patch} | 14 +-
...rs-respect-inventory-max-stack-size.patch} | 0
...ize-entity-tracker-passenger-checks.patch} | 0
...-option-for-Piglins-guarding-chests.patch} | 0
...=> 0682-Added-EntityDamageItemEvent.patch} | 0
...timize-indirect-passenger-iteration.patch} | 7 +-
...osition-losing-precision-millions-o.patch} | 0
...em-frame-map-cursor-update-interval.patch} | 0
...Make-EntityUnleashEvent-cancellable.patch} | 0
...687-Clear-bucket-NBT-after-dispense.patch} | 0
...arget-without-changing-other-things.patch} | 0
...ch => 0689-Add-BlockBreakBlockEvent.patch} | 0
...revent-NBT-copy-in-smithing-recipes.patch} | 0
...patch => 0691-More-CommandBlock-API.patch} | 0
...-missing-team-sidebar-display-slots.patch} | 0
...0693-Add-back-EntityPortalExitEvent.patch} | 6 +-
...-find-targets-for-lightning-strikes.patch} | 10 +-
... 0695-Get-entity-default-attributes.patch} | 0
...d-API.patch => 0696-Left-handed-API.patch} | 0
...=> 0697-Add-advancement-display-API.patch} | 0
...8-Add-ItemFactory-getMonsterEgg-API.patch} | 0
...tch => 0699-Add-critical-damage-API.patch} | 0
...0700-Fix-issues-with-mob-conversion.patch} | 0
...ollidable-methods-to-various-places.patch} | 0
...-ram-API.patch => 0702-Goat-ram-API.patch} | 0
...dd-API-for-resetting-a-single-score.patch} | 0
...4-Add-Raw-Byte-Entity-Serialization.patch} | 4 +-
...05-Vanilla-command-permission-fixes.patch} | 0
...logic-for-inventories-on-chunk-unlo.patch} | 8 +-
...07-Fix-GameProfileCache-concurrency.patch} | 4 +-
...g-when-the-async-catcher-is-tripped.patch} | 4 +-
...per-mobcaps-and-paper-playermobcaps.patch} | 10 +-
...tize-ResourceLocation-error-logging.patch} | 0
...rolled-flushing-for-network-manager.patch} | 20 +-
...=> 0712-Optimise-general-POI-access.patch} | 19 +-
... 0713-Optimise-chunk-tick-iteration.patch} | 42 +-
...> 0714-Execute-chunk-tasks-mid-tick.patch} | 28 +-
...ulate-regionfile-header-if-it-is-co.patch} | 10 +-
...ementation-for-blockstate-state-loo.patch} | 0
...-more-information-in-watchdog-dumps.patch} | 32 +-
...lly-inline-methods-in-BlockPosition.patch} | 2 +-
... 0719-Distance-manager-tick-timings.patch} | 28 +-
...ler-threads-according-to-the-plugin.patch} | 2 +-
...-getChunkAt-has-inlined-logic-for-l.patch} | 6 +-
...h => 0722-Add-packet-limiter-config.patch} | 8 +-
...Stem-registry-when-loading-default-.patch} | 2 +-
...our-chunk-data-off-disk-when-conver.patch} | 2 +-
...sh-calls-for-entity-tracker-packets.patch} | 6 +-
...-lookup-fluid-state-when-raytracing.patch} | 2 +-
...atch => 0727-Time-scoreboard-search.patch} | 2 +-
...packets-for-hard-colliding-entities.patch} | 0
...9-Do-not-run-raytrace-logic-for-AIR.patch} | 0
...rimise-map-impl-for-tracked-players.patch} | 12 +-
...imise-BlockSoil-nearby-water-lookup.patch} | 0
... 0732-Optimise-random-block-ticking.patch} | 14 +-
.../server/0733-Add-more-async-catchers.patch | 44 -
...3-Optimise-non-flush-packet-sending.patch} | 6 +-
...0734-Optimise-nearby-player-lookups.patch} | 59 +-
...0735-Remove-streams-for-villager-AI.patch} | 2 +-
...city-compression-and-cipher-natives.patch} | 6 +-
...hread-worker-count-for-low-core-cou.patch} | 0
...ifications-to-critical-entity-state.patch} | 40 +-
...ix-Bukkit-NamespacedKey-shenanigans.patch} | 0
...ntory-not-closing-on-entity-removal.patch} | 4 +-
...rement-before-suggesting-root-nodes.patch} | 0
...ServerboundCommandSuggestionPacket-.patch} | 0
...nColor-on-tropical-fish-bucket-meta.patch} | 0
...=> 0744-Ensure-valid-vehicle-status.patch} | 4 +-
...ftlocked-end-exit-portal-generation.patch} | 0
...r-causing-a-crash-when-trying-to-ge.patch} | 0
...-t-log-debug-logging-being-disabled.patch} | 0
...ous-menus-with-empty-level-accesses.patch} | 0
...h => 0749-Preserve-overstacked-loot.patch} | 0
...ate-head-rotation-in-missing-places.patch} | 6 +-
...unintended-light-block-manipulation.patch} | 0
...0752-Fix-CraftCriteria-defaults-map.patch} | 0
...Fix-upstreams-block-state-factories.patch} | 0
...ion-for-logging-player-ip-addresses.patch} | 2 +-
... => 0755-Configurable-feature-seeds.patch} | 6 +-
...pper-didnt-account-for-entity-sende.patch} | 0
... 0757-Add-root-admin-user-detection.patch} | 2 +-
...ays-allow-item-changing-in-Fireball.patch} | 0
.../0758-Optimise-WorldServer-notify.patch | 337 -
...t-attempt-to-teleport-dead-entities.patch} | 4 +-
...ive-velocity-through-repeated-crits.patch} | 0
...e-code-using-deprecated-for-removal.patch} | 0
...ochunk-light-sources-unless-it-is-m.patch} | 4 +-
...emoving-recipes-from-RecipeIterator.patch} | 0
...versized-item-data-in-equipment-and.patch} | 0
...e-unnecessary-itemmeta-from-clients.patch} | 0
...ier-changing-growth-for-other-crops.patch} | 0
...OpenersCounter-openCount-from-going.patch} | 0
...0768-Add-PlayerItemFrameChangeEvent.patch} | 0
...> 0769-Add-player-health-update-API.patch} | 6 +-
...tch => 0770-Optimize-HashMapPalette.patch} | 0
...low-delegation-to-vanilla-chunk-gen.patch} | 2 +-
...ingle-and-multi-AABB-VoxelShapes-an.patch} | 31 +-
...n-checking-in-player-move-packet-ha.patch} | 2 +-
...-isSectionEmpty-int-and-optimize-Pa.patch} | 2 +-
...te-Log4j.patch => 0775-Update-Log4j.patch} | 0
...patch => 0776-Add-more-Campfire-API.patch} | 0
...data-to-disk-if-it-serializes-witho.patch} | 8 +-
...78-Fix-tripwire-state-inconsistency.patch} | 0
...uid-logging-on-Block-breakNaturally.patch} | 0
...ard-CraftEntity-in-teleport-command.patch} | 6 +-
... => 0781-Improve-scoreboard-entries.patch} | 0
...ch => 0782-Entity-powdered-snow-API.patch} | 0
...0783-Add-API-for-item-entity-health.patch} | 0
...-type-tags-suggestions-in-selectors.patch} | 0
...ax-block-light-for-monster-spawning.patch} | 0
...pistons-and-BlockPistonRetractEvent.patch} | 0
...plifiers-greater-than-127-correctly.patch} | 0
...-canSmelt-methods-to-FurnaceInvento.patch} | 0
...=> 0789-Fix-bees-aging-inside-hives.patch} | 0
...le-API.patch => 0790-Bucketable-API.patch} | 0
...layer-world-in-endPortalSoundRadius.patch} | 0
...es.patch => 0792-Validate-usernames.patch} | 2 +-
...ing-configs-with-more-long-comments.patch} | 0
...er-animal-spawn-height-configurable.patch} | 0
...anilla-BiomeProvider-from-WorldInfo.patch} | 6 +-
...ion-for-worlds-affected-by-time-cmd.patch} | 0
...load-to-PersistentDataContainer-has.patch} | 0
...8-Multiple-Entries-with-Scoreboards.patch} | 0
...799-Reset-placed-block-on-exception.patch} | 0
...configurable-height-for-slime-spawn.patch} | 0
...ostname-to-AsyncPlayerPreLoginEvent.patch} | 0
...0802-Fix-xp-reward-for-baby-zombies.patch} | 0
... 0803-Kick-on-main-for-illegal-chat.patch} | 0
...lti-Block-Change-API-Implementation.patch} | 4 +-
...ent.patch => 0805-Fix-NotePlayEvent.patch} | 0
....patch => 0806-Freeze-Tick-Lock-API.patch} | 8 +-
...lphin-API.patch => 0807-Dolphin-API.patch} | 0
...h => 0808-More-PotionEffectType-API.patch} | 0
...for-StructureTemplate.Pallete-cache.patch} | 0
...command-sender-which-forwards-feedb.patch} | 2 +-
...0811-Add-config-for-stronghold-seed.patch} | 2 +-
...h => 0812-Implement-regenerateChunk.patch} | 2 +-
...lled-powdered-snow-bucket-placement.patch} | 0
...ate-calls-to-CraftServer-getSpawnLi.patch} | 2 +-
...gs.patch => 0815-Add-GameEvent-tags.patch} | 2 +-
...ks-fairly-for-worlds-while-waiting-.patch} | 6 +-
...tch => 0817-Furnace-RecipesUsed-API.patch} | 0
...gurable-sculk-sensor-listener-range.patch} | 0
...d-missing-block-data-mins-and-maxes.patch} | 0
...fault-CustomSpawners-in-custom-worl.patch} | 2 +-
...-worldlist-before-initing-the-world.patch} | 6 +-
... => 0822-Fix-Entity-Position-Desync.patch} | 0
...s.patch => 0823-Custom-Potion-Mixes.patch} | 6 +-
...0824-Fix-Fluid-tags-isTagged-method.patch} | 0
...25-Force-close-world-loading-screen.patch} | 2 +-
...826-Fix-falling-block-spawn-methods.patch} | 6 +-
...Expose-furnace-minecart-push-values.patch} | 0
...ojectileHitEvent-for-piercing-arrow.patch} | 0
...I.patch => 0829-More-Projectile-API.patch} | 2 +-
...x-swamp-hut-cat-generation-deadlock.patch} | 0
...le-movement-from-players-while-tele.patch} | 0
...0832-Implement-getComputedBiome-API.patch} | 0
...> 0833-Make-some-itemstacks-nonnull.patch} | 0
...alid-GameProfiles-on-skull-blocks-i.patch} | 0
...835-Implement-enchantWithLevels-API.patch} | 0
...h => 0836-Fix-saving-in-unloadWorld.patch} | 2 +-
...h => 0837-Buffer-OOB-setBlock-calls.patch} | 0
... 0838-Add-TameableDeathMessageEvent.patch} | 0
...a-for-EntityChangeBlockEvent-when-s.patch} | 0
...bles-running-when-mob-loot-gamerule.patch} | 4 +-
...ssenger-world-matches-ridden-entity.patch} | 4 +-
...rd-against-invalid-entity-positions.patch} | 8 +-
...s.patch => 0843-cache-resource-keys.patch} | 0
...ange-the-podium-for-the-EnderDragon.patch} | 6 +-
...erriding-a-block-entity-during-worl.patch} | 0
...eGrowEvent-species-for-RED_MUSHROOM.patch} | 0
...t-tile-entity-copies-loading-chunks.patch} | 0
...ead-of-display-name-in-PlayerList-g.patch} | 2 +-
...s-not-spawning-outside-slime-chunks.patch} | 0
...-ServerLevel-for-gamerule-callbacks.patch} | 8 +-
...ing-amount-to-PlayerItemDamageEvent.patch} | 0
...> 0852-WorldCreator-keepSpawnLoaded.patch} | 4 +-
...-NPE-for-BlockDataMeta-getBlockData.patch} | 0
...destroyed-trigger-in-the-correct-pl.patch} | 0
...Event-and-CollarColorable-interface.patch} | 0
...CauldronLevelChange-on-initial-fill.patch} | 0
...snow-cauldrons-not-turning-to-water.patch} | 0
...> 0858-Add-PlayerStopUsingItemEvent.patch} | 0
...59-FallingBlock-auto-expire-setting.patch} | 0
...rs.patch => 0860-Don-t-tick-markers.patch} | 6 +-
...-not-accept-invalid-client-settings.patch} | 0
...0862-Add-support-for-Proxy-Protocol.patch} | 0
...x-OfflinePlayer-getBedSpawnLocation.patch} | 0
...tory-for-smokers-and-blast-furnaces.patch} | 0
... 0865-Sanitize-Sent-BlockEntity-NBT.patch} | 0
...ntity-loading-causing-async-lookups.patch} | 4 +-
...-selector-resolving-in-books-by-def.patch} | 0
...-on-world-create-while-being-ticked.patch} | 10 +-
...ate-Current-redstone-implementation.patch} | 10 +-
...70-Dont-resent-entity-on-art-update.patch} | 0
...atch => 0871-Add-missing-spawn-eggs.patch} | 2 +-
... => 0872-Add-WardenAngerChangeEvent.patch} | 0
...strict-advancement-dimension-checks.patch} | 4 +-
...tant-BlockStateListPopulator-method.patch} | 0
...I.patch => 0875-Nameable-Banner-API.patch} | 0
...roadcast-messages-to-command-blocks.patch} | 2 +-
...pty-items-from-being-added-to-world.patch} | 4 +-
...hPotion-and-LingeringPotion-spawnin.patch} | 0
...nent-in-resource-pack-rejection-mes.patch} | 0
...atch => 0880-Add-Player-getFishHook.patch} | 0
...chunk-for-dynamic-game-event-listen.patch} | 0
...s-missing-EntityDropItemEvent-calls.patch} | 4 +-
...debug-information-to-chat-packet-er.patch} | 0
...PE.patch => 0884-Fix-Bee-flower-NPE.patch} | 0
...-not-using-commands.spam-exclusions.patch} | 0
...on-to-Tadpoles-spawned-by-Frogspawn.patch} | 0
...API.patch => 0887-More-Teleport-API.patch} | 2 +-
... => 0888-Add-EntityPortalReadyEvent.patch} | 6 +-
...level-random-in-entity-constructors.patch} | 0
...k-entities-after-destroy-prediction.patch} | 0
...on-plugins-accessing-faraway-chunks.patch} | 12 +-
...tom-Chat-Completion-Suggestions-API.patch} | 2 +-
...=> 0893-Add-missing-BlockFadeEvents.patch} | 0
...ion-API.patch => 0894-Collision-API.patch} | 0
...nd-message-for-brigadier-syntax-exc.patch} | 0
...ocess-cancelling-and-command-changi.patch} | 0
...-invalid-signature-login-stacktrace.patch} | 0
...-to-PlayerConnection-internalTelepo.patch} | 0
...API.patch => 0899-Block-Ticking-API.patch} | 0
...-Add-Velocity-IP-Forwarding-Support.patch} | 2 +-
...andom-in-ServerLoginPacketListenerI.patch} | 0
...902-Add-NamespacedKey-biome-methods.patch} | 0
...x-plugin-loggers-on-server-shutdown.patch} | 4 +-
...und-for-client-lag-spikes-MC-162253.patch} | 6 +-
...ok-changes-from-crashing-the-server.patch} | 0
...estroyerIdentity-to-sendBlockDamage.patch} | 2 +-
...t-killed-statuses-should-be-false-f.patch} | 0
...tityChangeBlockEvent-in-more-places.patch} | 0
...> 0909-Missing-eating-regain-reason.patch} | 0
....patch => 0910-Missing-effect-cause.patch} | 0
...serialization-deserialization-for-P.patch} | 0
...rameter-to-ProjectileSource-launchP.patch} | 0
...3-Call-BlockPhysicsEvent-more-often.patch} | 0
...0914-Configurable-chat-thread-limit.patch} | 4 +-
...of-WorldCreator-keepSpawnLoaded-ret.patch} | 0
...re-player-sending-on-dimension-chan.patch} | 4 +-
942 files changed, 20131 insertions(+), 2697 deletions(-)
rename patches/{server => removed/1.19.2-legacy-chunksystem}/0014-ChunkMapDistance-CME.patch (100%)
rename patches/{server => removed/1.19.2-legacy-chunksystem}/0015-Do-not-copy-visible-chunks.patch (100%)
rename patches/{server => removed/1.19.2-legacy-chunksystem}/0016-Chunk-debug-command.patch (100%)
rename patches/{server => removed/1.19.2-legacy-chunksystem}/0017-Make-CallbackExecutor-strict-again.patch (100%)
rename patches/{server => removed/1.19.2-legacy-chunksystem}/0019-Asynchronous-chunk-IO-and-loading.patch (100%)
rename patches/{server => removed/1.19.2-legacy-chunksystem}/0020-Implement-Chunk-Priority-Urgency-System-for-Chunks.patch (100%)
rename patches/{server => removed/1.19.2-legacy-chunksystem}/0048-Per-Player-View-Distance-API-placeholders.patch (100%)
rename patches/{server => removed/1.19.2-legacy-chunksystem}/0137-Make-targetSize-more-aggressive-in-the-chunk-unload-.patch (100%)
rename patches/{server => removed/1.19.2-legacy-chunksystem}/0324-Fix-CraftServer-isPrimaryThread-and-MinecraftServer-.patch (100%)
rename patches/{server => removed/1.19.2-legacy-chunksystem}/0355-Fix-Light-Command.patch (100%)
rename patches/{server => removed/1.19.2-legacy-chunksystem}/0393-Optimise-ArraySetSorted-removeIf.patch (100%)
rename patches/{server => removed/1.19.2-legacy-chunksystem}/0397-Fix-Chunk-Post-Processing-deadlock-risk.patch (100%)
rename patches/{server => removed/1.19.2-legacy-chunksystem}/0429-Optimize-ServerLevels-chunk-level-checking-methods.patch (100%)
rename patches/{server => removed/1.19.2-legacy-chunksystem}/0482-Improve-Chunk-Status-Transition-Speed.patch (100%)
rename patches/{server => removed/1.19.2-legacy-chunksystem}/0720-Do-not-allow-the-server-to-unload-chunks-at-request-.patch (100%)
rename patches/{server => removed/1.19.2-legacy-chunksystem}/0722-Correctly-handle-recursion-for-chunkholder-updates.patch (100%)
rename patches/{server => removed/1.19.2-legacy-chunksystem}/0724-Fix-chunks-refusing-to-unload-at-low-TPS.patch (100%)
rename patches/{server => removed/1.19.2-legacy-chunksystem}/0725-Do-not-allow-ticket-level-changes-while-unloading-pl.patch (100%)
rename patches/{server => removed/1.19.2-legacy-chunksystem}/0726-Do-not-allow-ticket-level-changes-when-updating-chun.patch (100%)
rename patches/{server => removed/1.19.2-legacy-chunksystem}/0729-Prevent-unload-calls-removing-tickets-for-sync-loads.patch (100%)
rename patches/{server => removed/1.19.2-legacy-chunksystem}/0734-Rewrite-entity-bounding-box-lookup-calls.patch (100%)
rename patches/{server => removed/1.19.2-legacy-chunksystem}/0754-Allow-removal-addition-of-entities-to-entity-ticklis.patch (100%)
rename patches/{server => removed/1.19.2-legacy-chunksystem}/0763-Do-not-process-entity-loads-in-CraftChunk-getEntitie.patch (100%)
rename patches/{server => removed/1.19.2-legacy-chunksystem}/0801-Actually-unload-POI-data.patch (100%)
rename patches/{server => removed/1.19.2-legacy-chunksystem}/0845-Replace-ticket-level-propagator.patch (100%)
rename patches/{server => removed/1.19.2-legacy-chunksystem}/0853-Replace-player-chunk-loader-system.patch (100%)
rename patches/{server => removed/1.19.2-legacy-chunksystem}/0859-Fix-save-problems-on-shutdown.patch (100%)
rename patches/server/{0760-Rewrite-dataconverter-system.patch => 0013-Rewrite-dataconverter-system.patch} (99%)
rename patches/server/{0788-Rewrite-the-light-engine.patch => 0014-Starlight.patch} (94%)
rename patches/server/{0013-Not-implemeneted.patch => 0015-Not-implemeneted.patch} (100%)
create mode 100644 patches/server/0016-Rewrite-chunk-system.patch
rename patches/server/{0021-Add-command-line-option-to-load-extra-plugin-jars-no.patch => 0017-Add-command-line-option-to-load-extra-plugin-jars-no.patch} (97%)
rename patches/server/{0022-Configurable-cactus-bamboo-and-reed-growth-heights.patch => 0018-Configurable-cactus-bamboo-and-reed-growth-heights.patch} (100%)
delete mode 100644 patches/server/0018-Delay-Chunk-Unloads-based-on-Player-Movement.patch
rename patches/server/{0023-Configurable-baby-zombie-movement-speed.patch => 0019-Configurable-baby-zombie-movement-speed.patch} (100%)
rename patches/server/{0024-Configurable-fishing-time-ranges.patch => 0020-Configurable-fishing-time-ranges.patch} (100%)
rename patches/server/{0025-Allow-nerfed-mobs-to-jump-and-take-water-damage.patch => 0021-Allow-nerfed-mobs-to-jump-and-take-water-damage.patch} (100%)
rename patches/server/{0026-Add-configurable-despawn-distances-for-living-entiti.patch => 0022-Add-configurable-despawn-distances-for-living-entiti.patch} (100%)
rename patches/server/{0027-Allow-for-toggling-of-spawn-chunks.patch => 0023-Allow-for-toggling-of-spawn-chunks.patch} (91%)
rename patches/server/{0028-Drop-falling-block-and-tnt-entities-at-the-specified.patch => 0024-Drop-falling-block-and-tnt-entities-at-the-specified.patch} (100%)
rename patches/server/{0029-Show-Paper-in-client-crashes-server-lists-and-Mojang.patch => 0025-Show-Paper-in-client-crashes-server-lists-and-Mojang.patch} (88%)
rename patches/server/{0030-Implement-Paper-VersionChecker.patch => 0026-Implement-Paper-VersionChecker.patch} (100%)
rename patches/server/{0031-Add-version-history-to-version-command.patch => 0027-Add-version-history-to-version-command.patch} (98%)
rename patches/server/{0032-Player-affects-spawning-API.patch => 0028-Player-affects-spawning-API.patch} (95%)
rename patches/server/{0033-Further-improve-server-tick-loop.patch => 0029-Further-improve-server-tick-loop.patch} (91%)
rename patches/server/{0034-Only-refresh-abilities-if-needed.patch => 0030-Only-refresh-abilities-if-needed.patch} (87%)
rename patches/server/{0035-Entity-Origin-API.patch => 0031-Entity-Origin-API.patch} (94%)
rename patches/server/{0036-Prevent-tile-entity-and-entity-crashes.patch => 0032-Prevent-tile-entity-and-entity-crashes.patch} (92%)
rename patches/server/{0037-Configurable-top-of-nether-void-damage.patch => 0033-Configurable-top-of-nether-void-damage.patch} (87%)
rename patches/server/{0038-Check-online-mode-before-converting-and-renaming-pla.patch => 0034-Check-online-mode-before-converting-and-renaming-pla.patch} (92%)
rename patches/server/{0039-Always-tick-falling-blocks.patch => 0035-Always-tick-falling-blocks.patch} (90%)
rename patches/server/{0040-Configurable-end-credits.patch => 0036-Configurable-end-credits.patch} (85%)
rename patches/server/{0041-Fix-lag-from-explosions-processing-dead-entities.patch => 0037-Fix-lag-from-explosions-processing-dead-entities.patch} (100%)
rename patches/server/{0042-Optimize-explosions.patch => 0038-Optimize-explosions.patch} (95%)
rename patches/server/{0043-Disable-explosion-knockback.patch => 0039-Disable-explosion-knockback.patch} (100%)
rename patches/server/{0044-Disable-thunder.patch => 0040-Disable-thunder.patch} (88%)
rename patches/server/{0045-Disable-ice-and-snow.patch => 0041-Disable-ice-and-snow.patch} (85%)
rename patches/server/{0046-Configurable-mob-spawner-tick-rate.patch => 0042-Configurable-mob-spawner-tick-rate.patch} (100%)
rename patches/server/{0047-Implement-PlayerLocaleChangeEvent.patch => 0043-Implement-PlayerLocaleChangeEvent.patch} (85%)
rename patches/server/{0049-Add-BeaconEffectEvent.patch => 0044-Add-BeaconEffectEvent.patch} (100%)
rename patches/server/{0050-Configurable-container-update-tick-rate.patch => 0045-Configurable-container-update-tick-rate.patch} (84%)
rename patches/server/{0051-Use-UserCache-for-player-heads.patch => 0046-Use-UserCache-for-player-heads.patch} (100%)
rename patches/server/{0052-Disable-spigot-tick-limiters.patch => 0047-Disable-spigot-tick-limiters.patch} (88%)
rename patches/server/{0053-Add-PlayerInitialSpawnEvent.patch => 0048-Add-PlayerInitialSpawnEvent.patch} (95%)
rename patches/server/{0054-Configurable-Disabling-Cat-Chest-Detection.patch => 0049-Configurable-Disabling-Cat-Chest-Detection.patch} (100%)
rename patches/server/{0055-Ensure-commands-are-not-ran-async.patch => 0050-Ensure-commands-are-not-ran-async.patch} (96%)
rename patches/server/{0056-All-chunks-are-slime-spawn-chunks-toggle.patch => 0051-All-chunks-are-slime-spawn-chunks-toggle.patch} (93%)
rename patches/server/{0057-Expose-server-CommandMap.patch => 0052-Expose-server-CommandMap.patch} (87%)
rename patches/server/{0058-Be-a-bit-more-informative-in-maxHealth-exception.patch => 0053-Be-a-bit-more-informative-in-maxHealth-exception.patch} (100%)
rename patches/server/{0059-Ensure-inv-drag-is-in-bounds.patch => 0054-Ensure-inv-drag-is-in-bounds.patch} (100%)
rename patches/server/{0060-Player-Tab-List-and-Title-APIs.patch => 0055-Player-Tab-List-and-Title-APIs.patch} (98%)
rename patches/server/{0061-Add-configurable-portal-search-radius.patch => 0056-Add-configurable-portal-search-radius.patch} (94%)
rename patches/server/{0062-Add-velocity-warnings.patch => 0057-Add-velocity-warnings.patch} (91%)
rename patches/server/{0063-Configurable-inter-world-teleportation-safety.patch => 0058-Configurable-inter-world-teleportation-safety.patch} (91%)
rename patches/server/{0064-Add-exception-reporting-event.patch => 0059-Add-exception-reporting-event.patch} (91%)
rename patches/server/{0065-Don-t-nest-if-we-don-t-need-to-when-cerealising-text.patch => 0060-Don-t-nest-if-we-don-t-need-to-when-cerealising-text.patch} (100%)
rename patches/server/{0066-Disable-Scoreboards-for-non-players-by-default.patch => 0061-Disable-Scoreboards-for-non-players-by-default.patch} (92%)
rename patches/server/{0067-Add-methods-for-working-with-arrows-stuck-in-living-.patch => 0062-Add-methods-for-working-with-arrows-stuck-in-living-.patch} (100%)
rename patches/server/{0068-Chunk-Save-Reattempt.patch => 0063-Chunk-Save-Reattempt.patch} (85%)
rename patches/server/{0069-Complete-resource-pack-API.patch => 0064-Complete-resource-pack-API.patch} (96%)
rename patches/server/{0070-Default-loading-permissions.yml-before-plugins.patch => 0065-Default-loading-permissions.yml-before-plugins.patch} (95%)
rename patches/server/{0071-Allow-Reloading-of-Custom-Permissions.patch => 0066-Allow-Reloading-of-Custom-Permissions.patch} (94%)
rename patches/server/{0072-Remove-Metadata-on-reload.patch => 0067-Remove-Metadata-on-reload.patch} (93%)
rename patches/server/{0073-Handle-Item-Meta-Inconsistencies.patch => 0068-Handle-Item-Meta-Inconsistencies.patch} (100%)
rename patches/server/{0074-Configurable-Non-Player-Arrow-Despawn-Rate.patch => 0069-Configurable-Non-Player-Arrow-Despawn-Rate.patch} (100%)
rename patches/server/{0075-Add-World-Util-Methods.patch => 0070-Add-World-Util-Methods.patch} (92%)
rename patches/server/{0076-Custom-replacement-for-eaten-items.patch => 0071-Custom-replacement-for-eaten-items.patch} (100%)
rename patches/server/{0077-handle-NaN-health-absorb-values-and-repair-bad-data.patch => 0072-handle-NaN-health-absorb-values-and-repair-bad-data.patch} (94%)
rename patches/server/{0078-Use-a-Shared-Random-for-Entities.patch => 0073-Use-a-Shared-Random-for-Entities.patch} (96%)
rename patches/server/{0079-Configurable-spawn-chances-for-skeleton-horses.patch => 0074-Configurable-spawn-chances-for-skeleton-horses.patch} (90%)
rename patches/server/{0080-Optimize-isInWorldBounds-and-getBlockState-for-inlin.patch => 0075-Optimize-isInWorldBounds-and-getBlockState-for-inlin.patch} (88%)
rename patches/server/{0081-Only-process-BlockPhysicsEvent-if-a-plugin-has-a-lis.patch => 0076-Only-process-BlockPhysicsEvent-if-a-plugin-has-a-lis.patch} (91%)
rename patches/server/{0082-Entity-AddTo-RemoveFrom-World-Events.patch => 0077-Entity-AddTo-RemoveFrom-World-Events.patch} (83%)
rename patches/server/{0083-Configurable-Chunk-Inhabited-Time.patch => 0078-Configurable-Chunk-Inhabited-Time.patch} (88%)
rename patches/server/{0084-EntityPathfindEvent.patch => 0079-EntityPathfindEvent.patch} (100%)
rename patches/server/{0085-Sanitise-RegionFileCache-and-make-configurable.patch => 0080-Sanitise-RegionFileCache-and-make-configurable.patch} (93%)
rename patches/server/{0086-Do-not-load-chunks-for-Pathfinding.patch => 0081-Do-not-load-chunks-for-Pathfinding.patch} (100%)
rename patches/server/{0087-Add-PlayerUseUnknownEntityEvent.patch => 0082-Add-PlayerUseUnknownEntityEvent.patch} (100%)
rename patches/server/{0088-Configurable-Grass-Spread-Tick-Rate.patch => 0083-Configurable-Grass-Spread-Tick-Rate.patch} (100%)
rename patches/server/{0089-Fix-Cancelling-BlockPlaceEvent-triggering-physics.patch => 0084-Fix-Cancelling-BlockPlaceEvent-triggering-physics.patch} (82%)
rename patches/server/{0090-Optimize-DataBits.patch => 0085-Optimize-DataBits.patch} (100%)
rename patches/server/{0091-Option-to-use-vanilla-per-world-scoreboard-coloring-.patch => 0086-Option-to-use-vanilla-per-world-scoreboard-coloring-.patch} (100%)
rename patches/server/{0092-Configurable-Player-Collision.patch => 0087-Configurable-Player-Collision.patch} (97%)
rename patches/server/{0093-Add-handshake-event-to-allow-plugins-to-handle-clien.patch => 0088-Add-handshake-event-to-allow-plugins-to-handle-clien.patch} (100%)
rename patches/server/{0094-Configurable-RCON-IP-address.patch => 0089-Configurable-RCON-IP-address.patch} (100%)
rename patches/server/{0095-EntityRegainHealthEvent-isFastRegen-API.patch => 0090-EntityRegainHealthEvent-isFastRegen-API.patch} (100%)
rename patches/server/{0096-Add-ability-to-configure-frosted_ice-properties.patch => 0091-Add-ability-to-configure-frosted_ice-properties.patch} (100%)
rename patches/server/{0097-remove-null-possibility-for-getServer-singleton.patch => 0092-remove-null-possibility-for-getServer-singleton.patch} (92%)
rename patches/server/{0098-Improve-Maps-in-item-frames-performance-and-bug-fixe.patch => 0093-Improve-Maps-in-item-frames-performance-and-bug-fixe.patch} (97%)
rename patches/server/{0099-LootTable-API-Replenishable-Lootables-Feature.patch => 0094-LootTable-API-Replenishable-Lootables-Feature.patch} (99%)
rename patches/server/{0100-Don-t-save-empty-scoreboard-teams-to-scoreboard.dat.patch => 0095-Don-t-save-empty-scoreboard-teams-to-scoreboard.dat.patch} (100%)
rename patches/server/{0101-System-property-for-disabling-watchdoge.patch => 0096-System-property-for-disabling-watchdoge.patch} (83%)
rename patches/server/{0102-Async-GameProfileCache-saving.patch => 0097-Async-GameProfileCache-saving.patch} (90%)
rename patches/server/{0103-Optional-TNT-doesn-t-move-in-water.patch => 0098-Optional-TNT-doesn-t-move-in-water.patch} (100%)
rename patches/server/{0104-Faster-redstone-torch-rapid-clock-removal.patch => 0099-Faster-redstone-torch-rapid-clock-removal.patch} (97%)
rename patches/server/{0105-Add-server-name-parameter.patch => 0100-Add-server-name-parameter.patch} (100%)
rename patches/server/{0106-Only-send-Dragon-Wither-Death-sounds-to-same-world.patch => 0101-Only-send-Dragon-Wither-Death-sounds-to-same-world.patch} (64%)
rename patches/server/{0107-Fix-Old-Sign-Conversion.patch => 0102-Fix-Old-Sign-Conversion.patch} (100%)
rename patches/server/{0108-Avoid-blocking-on-Network-Manager-creation.patch => 0103-Avoid-blocking-on-Network-Manager-creation.patch} (100%)
rename patches/server/{0109-Don-t-lookup-game-profiles-that-have-no-UUID-and-no-.patch => 0104-Don-t-lookup-game-profiles-that-have-no-UUID-and-no-.patch} (100%)
rename patches/server/{0110-Add-setting-for-proxy-online-mode-status.patch => 0105-Add-setting-for-proxy-online-mode-status.patch} (97%)
rename patches/server/{0111-Optimise-BlockState-s-hashCode-equals.patch => 0106-Optimise-BlockState-s-hashCode-equals.patch} (100%)
rename patches/server/{0112-Configurable-packet-in-spam-threshold.patch => 0107-Configurable-packet-in-spam-threshold.patch} (100%)
rename patches/server/{0113-Configurable-flying-kick-messages.patch => 0108-Configurable-flying-kick-messages.patch} (100%)
rename patches/server/{0114-Add-EntityZapEvent.patch => 0109-Add-EntityZapEvent.patch} (100%)
rename patches/server/{0115-Filter-bad-data-from-ArmorStand-and-SpawnEgg-items.patch => 0110-Filter-bad-data-from-ArmorStand-and-SpawnEgg-items.patch} (100%)
rename patches/server/{0116-Cache-user-authenticator-threads.patch => 0111-Cache-user-authenticator-threads.patch} (100%)
rename patches/server/{0117-Allow-Reloading-of-Command-Aliases.patch => 0112-Allow-Reloading-of-Command-Aliases.patch} (94%)
rename patches/server/{0118-Add-source-to-PlayerExpChangeEvent.patch => 0113-Add-source-to-PlayerExpChangeEvent.patch} (100%)
rename patches/server/{0119-Add-ProjectileCollideEvent.patch => 0114-Add-ProjectileCollideEvent.patch} (100%)
rename patches/server/{0120-Prevent-Pathfinding-out-of-World-Border.patch => 0115-Prevent-Pathfinding-out-of-World-Border.patch} (100%)
rename patches/server/{0121-Optimize-World.isLoaded-BlockPosition-Z.patch => 0116-Optimize-World.isLoaded-BlockPosition-Z.patch} (91%)
rename patches/server/{0122-Bound-Treasure-Maps-to-World-Border.patch => 0117-Bound-Treasure-Maps-to-World-Border.patch} (96%)
rename patches/server/{0123-Configurable-Cartographer-Treasure-Maps.patch => 0118-Configurable-Cartographer-Treasure-Maps.patch} (100%)
rename patches/server/{0124-Optimize-ItemStack.isEmpty.patch => 0119-Optimize-ItemStack.isEmpty.patch} (100%)
rename patches/server/{0125-Add-API-methods-to-control-if-armour-stands-can-move.patch => 0120-Add-API-methods-to-control-if-armour-stands-can-move.patch} (100%)
rename patches/server/{0126-String-based-Action-Bar-API.patch => 0121-String-based-Action-Bar-API.patch} (94%)
rename patches/server/{0127-Properly-fix-item-duplication-bug.patch => 0122-Properly-fix-item-duplication-bug.patch} (90%)
rename patches/server/{0128-Firework-API-s.patch => 0123-Firework-API-s.patch} (100%)
rename patches/server/{0129-PlayerTeleportEndGatewayEvent.patch => 0124-PlayerTeleportEndGatewayEvent.patch} (100%)
rename patches/server/{0130-Provide-E-TE-Chunk-count-stat-methods.patch => 0125-Provide-E-TE-Chunk-count-stat-methods.patch} (94%)
rename patches/server/{0131-Enforce-Sync-Player-Saves.patch => 0126-Enforce-Sync-Player-Saves.patch} (92%)
rename patches/server/{0132-Don-t-allow-entities-to-ride-themselves-572.patch => 0127-Don-t-allow-entities-to-ride-themselves-572.patch} (84%)
rename patches/server/{0133-ExperienceOrbs-API-for-Reason-Source-Triggering-play.patch => 0128-ExperienceOrbs-API-for-Reason-Source-Triggering-play.patch} (99%)
rename patches/server/{0134-Cap-Entity-Collisions.patch => 0129-Cap-Entity-Collisions.patch} (95%)
rename patches/server/{0135-Remove-CraftScheduler-Async-Task-Debugger.patch => 0130-Remove-CraftScheduler-Async-Task-Debugger.patch} (100%)
rename patches/server/{0136-Do-not-let-armorstands-drown.patch => 0131-Do-not-let-armorstands-drown.patch} (100%)
rename patches/server/{0138-Properly-handle-async-calls-to-restart-the-server.patch => 0132-Properly-handle-async-calls-to-restart-the-server.patch} (97%)
rename patches/server/{0139-Add-option-to-make-parrots-stay-on-shoulders-despite.patch => 0133-Add-option-to-make-parrots-stay-on-shoulders-despite.patch} (100%)
rename patches/server/{0140-Add-configuration-option-to-prevent-player-names-fro.patch => 0134-Add-configuration-option-to-prevent-player-names-fro.patch} (89%)
rename patches/server/{0141-Use-TerminalConsoleAppender-for-console-improvements.patch => 0135-Use-TerminalConsoleAppender-for-console-improvements.patch} (98%)
rename patches/server/{0142-provide-a-configurable-option-to-disable-creeper-lin.patch => 0136-provide-a-configurable-option-to-disable-creeper-lin.patch} (100%)
rename patches/server/{0143-Item-canEntityPickup.patch => 0137-Item-canEntityPickup.patch} (100%)
rename patches/server/{0144-PlayerPickupItemEvent-setFlyAtPlayer.patch => 0138-PlayerPickupItemEvent-setFlyAtPlayer.patch} (100%)
rename patches/server/{0145-PlayerAttemptPickupItemEvent.patch => 0139-PlayerAttemptPickupItemEvent.patch} (100%)
rename patches/server/{0146-Do-not-submit-profile-lookups-to-worldgen-threads.patch => 0140-Do-not-submit-profile-lookups-to-worldgen-threads.patch} (100%)
rename patches/server/{0147-Add-UnknownCommandEvent.patch => 0141-Add-UnknownCommandEvent.patch} (92%)
rename patches/server/{0148-Basic-PlayerProfile-API.patch => 0142-Basic-PlayerProfile-API.patch} (99%)
rename patches/server/{0149-Shoulder-Entities-Release-API.patch => 0143-Shoulder-Entities-Release-API.patch} (100%)
rename patches/server/{0150-Profile-Lookup-Events.patch => 0144-Profile-Lookup-Events.patch} (100%)
rename patches/server/{0151-Block-player-logins-during-server-shutdown.patch => 0145-Block-player-logins-during-server-shutdown.patch} (100%)
rename patches/server/{0152-Entity-fromMobSpawner.patch => 0146-Entity-fromMobSpawner.patch} (93%)
rename patches/server/{0153-Improve-the-Saddle-API-for-Horses.patch => 0147-Improve-the-Saddle-API-for-Horses.patch} (100%)
rename patches/server/{0154-Implement-ensureServerConversions-API.patch => 0148-Implement-ensureServerConversions-API.patch} (100%)
rename patches/server/{0155-Implement-getI18NDisplayName.patch => 0149-Implement-getI18NDisplayName.patch} (100%)
rename patches/server/{0156-ProfileWhitelistVerifyEvent.patch => 0150-ProfileWhitelistVerifyEvent.patch} (97%)
rename patches/server/{0157-Fix-this-stupid-bullshit.patch => 0151-Fix-this-stupid-bullshit.patch} (100%)
rename patches/server/{0158-LivingEntity-setKiller.patch => 0152-LivingEntity-setKiller.patch} (100%)
rename patches/server/{0159-Ocelot-despawns-should-honor-nametags-and-leash.patch => 0153-Ocelot-despawns-should-honor-nametags-and-leash.patch} (100%)
rename patches/server/{0160-Reset-spawner-timer-when-spawner-event-is-cancelled.patch => 0154-Reset-spawner-timer-when-spawner-event-is-cancelled.patch} (100%)
rename patches/server/{0161-Allow-specifying-a-custom-authentication-servers-dow.patch => 0155-Allow-specifying-a-custom-authentication-servers-dow.patch} (100%)
rename patches/server/{0162-Handle-plugin-prefixes-using-Log4J-configuration.patch => 0156-Handle-plugin-prefixes-using-Log4J-configuration.patch} (100%)
rename patches/server/{0163-Improve-Log4J-Configuration-Plugin-Loggers.patch => 0157-Improve-Log4J-Configuration-Plugin-Loggers.patch} (100%)
rename patches/server/{0164-Add-PlayerJumpEvent.patch => 0158-Add-PlayerJumpEvent.patch} (100%)
rename patches/server/{0165-handle-ServerboundKeepAlivePacket-async.patch => 0159-handle-ServerboundKeepAlivePacket-async.patch} (100%)
rename patches/server/{0166-Expose-client-protocol-version-and-virtual-host.patch => 0160-Expose-client-protocol-version-and-virtual-host.patch} (90%)
rename patches/server/{0167-revert-serverside-behavior-of-keepalives.patch => 0161-revert-serverside-behavior-of-keepalives.patch} (100%)
rename patches/server/{0168-Send-attack-SoundEffects-only-to-players-who-can-see.patch => 0162-Send-attack-SoundEffects-only-to-players-who-can-see.patch} (100%)
rename patches/server/{0169-Add-PlayerArmorChangeEvent.patch => 0163-Add-PlayerArmorChangeEvent.patch} (100%)
rename patches/server/{0170-Prevent-logins-from-being-processed-when-the-player-.patch => 0164-Prevent-logins-from-being-processed-when-the-player-.patch} (100%)
rename patches/server/{0171-Fix-MC-117075-TE-Unload-Lag-Spike.patch => 0165-Fix-MC-117075-TE-Unload-Lag-Spike.patch} (88%)
rename patches/server/{0172-use-CB-BlockState-implementations-for-captured-block.patch => 0166-use-CB-BlockState-implementations-for-captured-block.patch} (95%)
rename patches/server/{0173-API-to-get-a-BlockState-without-a-snapshot.patch => 0167-API-to-get-a-BlockState-without-a-snapshot.patch} (100%)
rename patches/server/{0174-AsyncTabCompleteEvent.patch => 0168-AsyncTabCompleteEvent.patch} (99%)
rename patches/server/{0175-PlayerPickupExperienceEvent.patch => 0169-PlayerPickupExperienceEvent.patch} (100%)
rename patches/server/{0176-Ability-to-apply-mending-to-XP-API.patch => 0170-Ability-to-apply-mending-to-XP-API.patch} (94%)
rename patches/server/{0177-PlayerNaturallySpawnCreaturesEvent.patch => 0171-PlayerNaturallySpawnCreaturesEvent.patch} (86%)
rename patches/server/{0178-Add-setPlayerProfile-API-for-Skulls.patch => 0172-Add-setPlayerProfile-API-for-Skulls.patch} (100%)
rename patches/server/{0179-PreCreatureSpawnEvent.patch => 0173-PreCreatureSpawnEvent.patch} (100%)
rename patches/server/{0180-Fill-Profile-Property-Events.patch => 0174-Fill-Profile-Property-Events.patch} (100%)
rename patches/server/{0181-PlayerAdvancementCriterionGrantEvent.patch => 0175-PlayerAdvancementCriterionGrantEvent.patch} (100%)
rename patches/server/{0182-Add-ArmorStand-Item-Meta.patch => 0176-Add-ArmorStand-Item-Meta.patch} (100%)
rename patches/server/{0183-Extend-Player-Interact-cancellation.patch => 0177-Extend-Player-Interact-cancellation.patch} (100%)
rename patches/server/{0184-Tameable-getOwnerUniqueId-API.patch => 0178-Tameable-getOwnerUniqueId-API.patch} (100%)
rename patches/server/{0185-Toggleable-player-crits-helps-mitigate-hacked-client.patch => 0179-Toggleable-player-crits-helps-mitigate-hacked-client.patch} (100%)
rename patches/server/{0186-Disable-Explicit-Network-Manager-Flushing.patch => 0180-Disable-Explicit-Network-Manager-Flushing.patch} (85%)
rename patches/server/{0187-Implement-extended-PaperServerListPingEvent.patch => 0181-Implement-extended-PaperServerListPingEvent.patch} (98%)
rename patches/server/{0188-Ability-to-change-PlayerProfile-in-AsyncPreLoginEven.patch => 0182-Ability-to-change-PlayerProfile-in-AsyncPreLoginEven.patch} (100%)
rename patches/server/{0189-Player.setPlayerProfile-API.patch => 0183-Player.setPlayerProfile-API.patch} (94%)
rename patches/server/{0190-getPlayerUniqueId-API.patch => 0184-getPlayerUniqueId-API.patch} (94%)
rename patches/server/{0191-Improved-Async-Task-Scheduler.patch => 0185-Improved-Async-Task-Scheduler.patch} (100%)
rename patches/server/{0192-Make-legacy-ping-handler-more-reliable.patch => 0186-Make-legacy-ping-handler-more-reliable.patch} (100%)
rename patches/server/{0193-Call-PaperServerListPingEvent-for-legacy-pings.patch => 0187-Call-PaperServerListPingEvent-for-legacy-pings.patch} (100%)
rename patches/server/{0194-Flag-to-disable-the-channel-limit.patch => 0188-Flag-to-disable-the-channel-limit.patch} (91%)
rename patches/server/{0195-Add-openSign-method-to-HumanEntity.patch => 0189-Add-openSign-method-to-HumanEntity.patch} (100%)
rename patches/server/{0196-Configurable-sprint-interruption-on-attack.patch => 0190-Configurable-sprint-interruption-on-attack.patch} (100%)
rename patches/server/{0197-Fix-exploit-that-allowed-colored-signs-to-be-created.patch => 0191-Fix-exploit-that-allowed-colored-signs-to-be-created.patch} (100%)
rename patches/server/{0198-EndermanEscapeEvent.patch => 0192-EndermanEscapeEvent.patch} (100%)
rename patches/server/{0199-Enderman.teleportRandomly.patch => 0193-Enderman.teleportRandomly.patch} (100%)
rename patches/server/{0200-Block-Enderpearl-Travel-Exploit.patch => 0194-Block-Enderpearl-Travel-Exploit.patch} (93%)
rename patches/server/{0201-Expand-World.spawnParticle-API-and-add-Builder.patch => 0195-Expand-World.spawnParticle-API-and-add-Builder.patch} (92%)
rename patches/server/{0202-Prevent-Frosted-Ice-from-loading-holding-chunks.patch => 0196-Prevent-Frosted-Ice-from-loading-holding-chunks.patch} (100%)
rename patches/server/{0203-EndermanAttackPlayerEvent.patch => 0197-EndermanAttackPlayerEvent.patch} (100%)
rename patches/server/{0204-WitchConsumePotionEvent.patch => 0198-WitchConsumePotionEvent.patch} (100%)
rename patches/server/{0205-WitchThrowPotionEvent.patch => 0199-WitchThrowPotionEvent.patch} (100%)
rename patches/server/{0206-Allow-spawning-Item-entities-with-World.spawnEntity.patch => 0200-Allow-spawning-Item-entities-with-World.spawnEntity.patch} (100%)
rename patches/server/{0207-WitchReadyPotionEvent.patch => 0201-WitchReadyPotionEvent.patch} (100%)
rename patches/server/{0208-ItemStack-getMaxItemUseDuration.patch => 0202-ItemStack-getMaxItemUseDuration.patch} (91%)
rename patches/server/{0209-Implement-EntityTeleportEndGatewayEvent.patch => 0203-Implement-EntityTeleportEndGatewayEvent.patch} (100%)
rename patches/server/{0210-Unset-Ignited-flag-on-cancel-of-Explosion-Event.patch => 0204-Unset-Ignited-flag-on-cancel-of-Explosion-Event.patch} (100%)
rename patches/server/{0211-Fix-CraftEntity-hashCode.patch => 0205-Fix-CraftEntity-hashCode.patch} (100%)
rename patches/server/{0212-Configurable-Alternative-LootPool-Luck-Formula.patch => 0206-Configurable-Alternative-LootPool-Luck-Formula.patch} (100%)
rename patches/server/{0213-Print-Error-details-when-failing-to-save-player-data.patch => 0207-Print-Error-details-when-failing-to-save-player-data.patch} (90%)
rename patches/server/{0214-Make-shield-blocking-delay-configurable.patch => 0208-Make-shield-blocking-delay-configurable.patch} (100%)
rename patches/server/{0215-Improve-EntityShootBowEvent.patch => 0209-Improve-EntityShootBowEvent.patch} (100%)
rename patches/server/{0216-PlayerReadyArrowEvent.patch => 0210-PlayerReadyArrowEvent.patch} (100%)
rename patches/server/{0217-Implement-EntityKnockbackByEntityEvent.patch => 0211-Implement-EntityKnockbackByEntityEvent.patch} (100%)
rename patches/server/{0218-Expand-Explosions-API.patch => 0212-Expand-Explosions-API.patch} (89%)
rename patches/server/{0219-LivingEntity-Hand-Raised-Item-Use-API.patch => 0213-LivingEntity-Hand-Raised-Item-Use-API.patch} (100%)
rename patches/server/{0220-RangedEntity-API.patch => 0214-RangedEntity-API.patch} (100%)
rename patches/server/{0221-Add-config-to-disable-ender-dragon-legacy-check.patch => 0215-Add-config-to-disable-ender-dragon-legacy-check.patch} (100%)
rename patches/server/{0222-Implement-World.getEntity-UUID-API.patch => 0216-Implement-World.getEntity-UUID-API.patch} (85%)
rename patches/server/{0223-InventoryCloseEvent-Reason-API.patch => 0217-InventoryCloseEvent-Reason-API.patch} (93%)
rename patches/server/{0224-Vex-get-setSummoner-API.patch => 0218-Vex-get-setSummoner-API.patch} (100%)
rename patches/server/{0225-Refresh-player-inventory-when-cancelling-PlayerInter.patch => 0219-Refresh-player-inventory-when-cancelling-PlayerInter.patch} (100%)
rename patches/server/{0226-Use-AsyncAppender-to-keep-logging-IO-off-main-thread.patch => 0220-Use-AsyncAppender-to-keep-logging-IO-off-main-thread.patch} (100%)
rename patches/server/{0227-add-more-information-to-Entity.toString.patch => 0221-add-more-information-to-Entity.toString.patch} (91%)
rename patches/server/{0228-Add-CraftMagicNumbers.isSupportedApiVersion.patch => 0222-Add-CraftMagicNumbers.isSupportedApiVersion.patch} (100%)
rename patches/server/{0229-EnderDragon-Events.patch => 0223-EnderDragon-Events.patch} (100%)
rename patches/server/{0230-PlayerElytraBoostEvent.patch => 0224-PlayerElytraBoostEvent.patch} (100%)
rename patches/server/{0231-PlayerLaunchProjectileEvent.patch => 0225-PlayerLaunchProjectileEvent.patch} (100%)
rename patches/server/{0232-Improve-BlockPosition-inlining.patch => 0226-Improve-BlockPosition-inlining.patch} (100%)
rename patches/server/{0233-Option-to-prevent-armor-stands-from-doing-entity-loo.patch => 0227-Option-to-prevent-armor-stands-from-doing-entity-loo.patch} (91%)
rename patches/server/{0234-Vanished-players-don-t-have-rights.patch => 0228-Vanished-players-don-t-have-rights.patch} (98%)
rename patches/server/{0235-Allow-disabling-armour-stand-ticking.patch => 0229-Allow-disabling-armour-stand-ticking.patch} (100%)
rename patches/server/{0236-SkeletonHorse-Additions.patch => 0230-SkeletonHorse-Additions.patch} (96%)
rename patches/server/{0237-Don-t-call-getItemMeta-on-hasItemMeta.patch => 0231-Don-t-call-getItemMeta-on-hasItemMeta.patch} (97%)
rename patches/server/{0238-Implement-Expanded-ArmorStand-API.patch => 0232-Implement-Expanded-ArmorStand-API.patch} (100%)
rename patches/server/{0239-AnvilDamageEvent.patch => 0233-AnvilDamageEvent.patch} (100%)
rename patches/server/{0240-Add-hand-to-bucket-events.patch => 0234-Add-hand-to-bucket-events.patch} (100%)
rename patches/server/{0241-Add-TNTPrimeEvent.patch => 0235-Add-TNTPrimeEvent.patch} (98%)
rename patches/server/{0242-Break-up-and-make-tab-spam-limits-configurable.patch => 0236-Break-up-and-make-tab-spam-limits-configurable.patch} (100%)
rename patches/server/{0243-MC-135506-Experience-should-save-as-Integers.patch => 0237-MC-135506-Experience-should-save-as-Integers.patch} (100%)
rename patches/server/{0244-Remove-unnecessary-itemmeta-handling.patch => 0238-Remove-unnecessary-itemmeta-handling.patch} (100%)
rename patches/server/{0245-Add-Debug-Entities-option-to-debug-dupe-uuid-issues.patch => 0239-Add-Debug-Entities-option-to-debug-dupe-uuid-issues.patch} (91%)
rename patches/server/{0246-Add-Early-Warning-Feature-to-WatchDog.patch => 0240-Add-Early-Warning-Feature-to-WatchDog.patch} (90%)
rename patches/server/{0247-Use-ConcurrentHashMap-in-JsonList.patch => 0241-Use-ConcurrentHashMap-in-JsonList.patch} (98%)
rename patches/server/{0248-Use-a-Queue-for-Queueing-Commands.patch => 0242-Use-a-Queue-for-Queueing-Commands.patch} (87%)
rename patches/server/{0249-Ability-to-get-Tile-Entities-from-a-chunk-without-sn.patch => 0243-Ability-to-get-Tile-Entities-from-a-chunk-without-sn.patch} (91%)
rename patches/server/{0250-Optimize-BlockPosition-helper-methods.patch => 0244-Optimize-BlockPosition-helper-methods.patch} (100%)
rename patches/server/{0251-Restore-vanilla-default-mob-spawn-range-and-water-an.patch => 0245-Restore-vanilla-default-mob-spawn-range-and-water-an.patch} (100%)
rename patches/server/{0252-Slime-Pathfinder-Events.patch => 0246-Slime-Pathfinder-Events.patch} (100%)
rename patches/server/{0253-Configurable-speed-for-water-flowing-over-lava.patch => 0247-Configurable-speed-for-water-flowing-over-lava.patch} (100%)
rename patches/server/{0254-Optimize-CraftBlockData-Creation.patch => 0248-Optimize-CraftBlockData-Creation.patch} (90%)
rename patches/server/{0255-Optimize-MappedRegistry.patch => 0249-Optimize-MappedRegistry.patch} (100%)
rename patches/server/{0256-Add-PhantomPreSpawnEvent.patch => 0250-Add-PhantomPreSpawnEvent.patch} (100%)
rename patches/server/{0257-Add-More-Creeper-API.patch => 0251-Add-More-Creeper-API.patch} (100%)
rename patches/server/{0258-Inventory-removeItemAnySlot.patch => 0252-Inventory-removeItemAnySlot.patch} (100%)
rename patches/server/{0259-Make-CraftWorld-loadChunk-int-int-false-load-unconve.patch => 0253-Make-CraftWorld-loadChunk-int-int-false-load-unconve.patch} (87%)
rename patches/server/{0260-Add-ray-tracing-methods-to-LivingEntity.patch => 0254-Add-ray-tracing-methods-to-LivingEntity.patch} (97%)
rename patches/server/{0261-Expose-attack-cooldown-methods-for-Player.patch => 0255-Expose-attack-cooldown-methods-for-Player.patch} (86%)
rename patches/server/{0262-Improve-death-events.patch => 0256-Improve-death-events.patch} (97%)
rename patches/server/{0263-Allow-chests-to-be-placed-with-NBT-data.patch => 0257-Allow-chests-to-be-placed-with-NBT-data.patch} (100%)
rename patches/server/{0264-Mob-Pathfinding-API.patch => 0258-Mob-Pathfinding-API.patch} (100%)
rename patches/server/{0265-Implement-an-API-for-CanPlaceOn-and-CanDestroy-NBT-v.patch => 0259-Implement-an-API-for-CanPlaceOn-and-CanDestroy-NBT-v.patch} (100%)
rename patches/server/{0266-Prevent-chunk-loading-from-Fluid-Flowing.patch => 0260-Prevent-chunk-loading-from-Fluid-Flowing.patch} (100%)
rename patches/server/{0267-Prevent-Mob-AI-Rules-from-Loading-Chunks.patch => 0261-Prevent-Mob-AI-Rules-from-Loading-Chunks.patch} (100%)
rename patches/server/{0268-Prevent-mob-spawning-from-loading-generating-chunks.patch => 0262-Prevent-mob-spawning-from-loading-generating-chunks.patch} (100%)
rename patches/server/{0269-Implement-furnace-cook-speed-multiplier-API.patch => 0263-Implement-furnace-cook-speed-multiplier-API.patch} (100%)
rename patches/server/{0270-Catch-JsonParseException-in-Entity-and-TE-names.patch => 0264-Catch-JsonParseException-in-Entity-and-TE-names.patch} (96%)
rename patches/server/{0271-Honor-EntityAgeable.ageLock.patch => 0265-Honor-EntityAgeable.ageLock.patch} (100%)
rename patches/server/{0272-Configurable-connection-throttle-kick-message.patch => 0266-Configurable-connection-throttle-kick-message.patch} (100%)
rename patches/server/{0273-Hook-into-CB-plugin-rewrites.patch => 0267-Hook-into-CB-plugin-rewrites.patch} (100%)
rename patches/server/{0274-PreSpawnerSpawnEvent.patch => 0268-PreSpawnerSpawnEvent.patch} (100%)
rename patches/server/{0275-Add-LivingEntity-getTargetEntity.patch => 0269-Add-LivingEntity-getTargetEntity.patch} (100%)
rename patches/server/{0276-Add-sun-related-API.patch => 0270-Add-sun-related-API.patch} (89%)
rename patches/server/{0277-Turtle-API.patch => 0271-Turtle-API.patch} (100%)
rename patches/server/{0278-Call-player-spectator-target-events-and-improve-impl.patch => 0272-Call-player-spectator-target-events-and-improve-impl.patch} (96%)
rename patches/server/{0279-MC-50319-Check-other-worlds-for-shooter-of-projectil.patch => 0273-MC-50319-Check-other-worlds-for-shooter-of-projectil.patch} (100%)
rename patches/server/{0280-Add-more-Witch-API.patch => 0274-Add-more-Witch-API.patch} (100%)
rename patches/server/{0281-Check-Drowned-for-Villager-Aggression-Config.patch => 0275-Check-Drowned-for-Villager-Aggression-Config.patch} (100%)
rename patches/server/{0282-Add-option-to-prevent-players-from-moving-into-unloa.patch => 0276-Add-option-to-prevent-players-from-moving-into-unloa.patch} (100%)
rename patches/server/{0283-Reset-players-airTicks-on-respawn.patch => 0277-Reset-players-airTicks-on-respawn.patch} (83%)
rename patches/server/{0284-Don-t-sleep-after-profile-lookups-if-not-needed.patch => 0278-Don-t-sleep-after-profile-lookups-if-not-needed.patch} (100%)
rename patches/server/{0285-Improve-Server-Thread-Pool-and-Thread-Priorities.patch => 0279-Improve-Server-Thread-Pool-and-Thread-Priorities.patch} (98%)
rename patches/server/{0286-Optimize-World-Time-Updates.patch => 0280-Optimize-World-Time-Updates.patch} (94%)
rename patches/server/{0287-Restore-custom-InventoryHolder-support.patch => 0281-Restore-custom-InventoryHolder-support.patch} (100%)
rename patches/server/{0288-Use-Vanilla-Minecart-Speeds.patch => 0282-Use-Vanilla-Minecart-Speeds.patch} (100%)
rename patches/server/{0289-Fix-SpongeAbsortEvent-handling.patch => 0283-Fix-SpongeAbsortEvent-handling.patch} (100%)
rename patches/server/{0290-Don-t-allow-digging-into-unloaded-chunks.patch => 0284-Don-t-allow-digging-into-unloaded-chunks.patch} (100%)
rename patches/server/{0291-Make-the-default-permission-message-configurable.patch => 0285-Make-the-default-permission-message-configurable.patch} (88%)
rename patches/server/{0292-Prevent-rayTrace-from-loading-chunks.patch => 0286-Prevent-rayTrace-from-loading-chunks.patch} (100%)
rename patches/server/{0293-Handle-Large-Packets-disconnecting-client.patch => 0287-Handle-Large-Packets-disconnecting-client.patch} (97%)
rename patches/server/{0294-force-entity-dismount-during-teleportation.patch => 0288-force-entity-dismount-during-teleportation.patch} (92%)
rename patches/server/{0295-Add-more-Zombie-API.patch => 0289-Add-more-Zombie-API.patch} (100%)
rename patches/server/{0296-Book-Size-Limits.patch => 0290-Book-Size-Limits.patch} (100%)
rename patches/server/{0297-Add-PlayerConnectionCloseEvent.patch => 0291-Add-PlayerConnectionCloseEvent.patch} (96%)
rename patches/server/{0298-Prevent-Enderman-from-loading-chunks.patch => 0292-Prevent-Enderman-from-loading-chunks.patch} (100%)
rename patches/server/{0299-Add-APIs-to-replace-OfflinePlayer-getLastPlayed.patch => 0293-Add-APIs-to-replace-OfflinePlayer-getLastPlayed.patch} (91%)
rename patches/server/{0300-Workaround-for-vehicle-tracking-issue-on-disconnect.patch => 0294-Workaround-for-vehicle-tracking-issue-on-disconnect.patch} (85%)
rename patches/server/{0301-Block-Entity-remove-from-being-called-on-Players.patch => 0295-Block-Entity-remove-from-being-called-on-Players.patch} (90%)
rename patches/server/{0302-BlockDestroyEvent.patch => 0296-BlockDestroyEvent.patch} (93%)
rename patches/server/{0303-Async-command-map-building.patch => 0297-Async-command-map-building.patch} (95%)
rename patches/server/{0304-Implement-Brigadier-Mojang-API.patch => 0298-Implement-Brigadier-Mojang-API.patch} (100%)
rename patches/server/{0305-Fix-Custom-Shapeless-Custom-Crafting-Recipes.patch => 0299-Fix-Custom-Shapeless-Custom-Crafting-Recipes.patch} (100%)
rename patches/server/{0306-Limit-Client-Sign-length-more.patch => 0300-Limit-Client-Sign-length-more.patch} (100%)
rename patches/server/{0307-Don-t-check-ConvertSigns-boolean-every-sign-save.patch => 0301-Don-t-check-ConvertSigns-boolean-every-sign-save.patch} (100%)
rename patches/server/{0308-Optimize-Network-Manager-and-add-advanced-packet-sup.patch => 0302-Optimize-Network-Manager-and-add-advanced-packet-sup.patch} (93%)
rename patches/server/{0309-Handle-Oversized-Tile-Entities-in-chunks.patch => 0303-Handle-Oversized-Tile-Entities-in-chunks.patch} (100%)
rename patches/server/{0310-Set-Zombie-last-tick-at-start-of-drowning-process.patch => 0304-Set-Zombie-last-tick-at-start-of-drowning-process.patch} (100%)
rename patches/server/{0311-Call-WhitelistToggleEvent-when-whitelist-is-toggled.patch => 0305-Call-WhitelistToggleEvent-when-whitelist-is-toggled.patch} (88%)
rename patches/server/{0312-Entity-getEntitySpawnReason.patch => 0306-Entity-getEntitySpawnReason.patch} (92%)
rename patches/server/{0313-Update-entity-Metadata-for-all-tracked-players.patch => 0307-Update-entity-Metadata-for-all-tracked-players.patch} (100%)
rename patches/server/{0314-Fire-event-on-GS4-query.patch => 0308-Fire-event-on-GS4-query.patch} (100%)
rename patches/server/{0315-Implement-PlayerPostRespawnEvent.patch => 0309-Implement-PlayerPostRespawnEvent.patch} (95%)
rename patches/server/{0316-don-t-go-below-0-for-pickupDelay-breaks-picking-up-i.patch => 0310-don-t-go-below-0-for-pickupDelay-breaks-picking-up-i.patch} (100%)
rename patches/server/{0317-Server-Tick-Events.patch => 0311-Server-Tick-Events.patch} (85%)
rename patches/server/{0318-PlayerDeathEvent-getItemsToKeep.patch => 0312-PlayerDeathEvent-getItemsToKeep.patch} (92%)
rename patches/server/{0319-Optimize-Captured-TileEntity-Lookup.patch => 0313-Optimize-Captured-TileEntity-Lookup.patch} (78%)
rename patches/server/{0320-Add-Heightmap-API.patch => 0314-Add-Heightmap-API.patch} (95%)
rename patches/server/{0321-Mob-Spawner-API-Enhancements.patch => 0315-Mob-Spawner-API-Enhancements.patch} (100%)
rename patches/server/{0322-Fix-CB-call-to-changed-postToMainThread-method.patch => 0316-Fix-CB-call-to-changed-postToMainThread-method.patch} (100%)
rename patches/server/{0323-Fix-sounds-when-item-frames-are-modified-MC-123450.patch => 0317-Fix-sounds-when-item-frames-are-modified-MC-123450.patch} (100%)
rename patches/server/{0325-Implement-CraftBlockSoundGroup.patch => 0318-Implement-CraftBlockSoundGroup.patch} (100%)
rename patches/server/{0326-Configurable-Keep-Spawn-Loaded-range-per-world.patch => 0319-Configurable-Keep-Spawn-Loaded-range-per-world.patch} (96%)
rename patches/server/{0327-Allow-Saving-of-Oversized-Chunks.patch => 0320-Allow-Saving-of-Oversized-Chunks.patch} (98%)
rename patches/server/{0328-Expose-the-internal-current-tick.patch => 0321-Expose-the-internal-current-tick.patch} (90%)
rename patches/server/{0329-Fix-World-isChunkGenerated-calls.patch => 0322-Fix-World-isChunkGenerated-calls.patch} (80%)
rename patches/server/{0330-Show-blockstate-location-if-we-failed-to-read-it.patch => 0323-Show-blockstate-location-if-we-failed-to-read-it.patch} (100%)
rename patches/server/{0331-Only-count-Natural-Spawned-mobs-towards-natural-spaw.patch => 0324-Only-count-Natural-Spawned-mobs-towards-natural-spaw.patch} (100%)
rename patches/server/{0332-Configurable-projectile-relative-velocity.patch => 0325-Configurable-projectile-relative-velocity.patch} (100%)
rename patches/server/{0333-offset-item-frame-ticking.patch => 0326-offset-item-frame-ticking.patch} (100%)
rename patches/server/{0334-Fix-MC-158900.patch => 0327-Fix-MC-158900.patch} (94%)
rename patches/server/{0335-Prevent-consuming-the-wrong-itemstack.patch => 0328-Prevent-consuming-the-wrong-itemstack.patch} (100%)
rename patches/server/{0336-Dont-send-unnecessary-sign-update.patch => 0329-Dont-send-unnecessary-sign-update.patch} (100%)
rename patches/server/{0337-Add-option-to-disable-pillager-patrols.patch => 0330-Add-option-to-disable-pillager-patrols.patch} (100%)
rename patches/server/{0338-Flat-bedrock-generator-settings.patch => 0331-Flat-bedrock-generator-settings.patch} (99%)
rename patches/server/{0339-Prevent-sync-chunk-loads-when-villagers-try-to-find-.patch => 0332-Prevent-sync-chunk-loads-when-villagers-try-to-find-.patch} (100%)
rename patches/server/{0340-MC-145656-Fix-Follow-Range-Initial-Target.patch => 0333-MC-145656-Fix-Follow-Range-Initial-Target.patch} (100%)
rename patches/server/{0341-Duplicate-UUID-Resolve-Option.patch => 0334-Duplicate-UUID-Resolve-Option.patch} (71%)
rename patches/server/{0342-Optimize-Hoppers.patch => 0335-Optimize-Hoppers.patch} (99%)
rename patches/server/{0343-PlayerDeathEvent-shouldDropExperience.patch => 0336-PlayerDeathEvent-shouldDropExperience.patch} (85%)
rename patches/server/{0344-Prevent-bees-loading-chunks-checking-hive-position.patch => 0337-Prevent-bees-loading-chunks-checking-hive-position.patch} (100%)
rename patches/server/{0345-Don-t-load-Chunks-from-Hoppers-and-other-things.patch => 0338-Don-t-load-Chunks-from-Hoppers-and-other-things.patch} (100%)
rename patches/server/{0346-Guard-against-serializing-mismatching-chunk-coordina.patch => 0339-Guard-against-serializing-mismatching-chunk-coordina.patch} (92%)
rename patches/server/{0347-Optimise-IEntityAccess-getPlayerByUUID.patch => 0340-Optimise-IEntityAccess-getPlayerByUUID.patch} (84%)
rename patches/server/{0348-Fix-items-not-falling-correctly.patch => 0341-Fix-items-not-falling-correctly.patch} (94%)
rename patches/server/{0349-Lag-compensate-eating.patch => 0342-Lag-compensate-eating.patch} (100%)
rename patches/server/{0350-Optimize-call-to-getFluid-for-explosions.patch => 0343-Optimize-call-to-getFluid-for-explosions.patch} (100%)
rename patches/server/{0351-Fix-last-firework-in-stack-not-having-effects-when-d.patch => 0344-Fix-last-firework-in-stack-not-having-effects-when-d.patch} (100%)
rename patches/server/{0352-Add-effect-to-block-break-naturally.patch => 0345-Add-effect-to-block-break-naturally.patch} (100%)
rename patches/server/{0353-Entity-Activation-Range-2.0.patch => 0346-Entity-Activation-Range-2.0.patch} (97%)
rename patches/server/{0354-Increase-Light-Queue-Size.patch => 0347-Increase-Light-Queue-Size.patch} (93%)
rename patches/server/{0356-Anti-Xray.patch => 0348-Anti-Xray.patch} (95%)
rename patches/server/{0357-Implement-alternative-item-despawn-rate.patch => 0349-Implement-alternative-item-despawn-rate.patch} (100%)
rename patches/server/{0358-Tracking-Range-Improvements.patch => 0350-Tracking-Range-Improvements.patch} (95%)
rename patches/server/{0359-Fix-items-vanishing-through-end-portal.patch => 0351-Fix-items-vanishing-through-end-portal.patch} (89%)
rename patches/server/{0360-implement-optional-per-player-mob-spawns.patch => 0352-implement-optional-per-player-mob-spawns.patch} (93%)
rename patches/server/{0362-Bees-get-gravity-in-void.-Fixes-MC-167279.patch => 0353-Bees-get-gravity-in-void.-Fixes-MC-167279.patch} (100%)
rename patches/server/{0363-Optimise-getChunkAt-calls-for-loaded-chunks.patch => 0354-Optimise-getChunkAt-calls-for-loaded-chunks.patch} (89%)
rename patches/server/{0364-Add-debug-for-sync-chunk-loads.patch => 0355-Add-debug-for-sync-chunk-loads.patch} (92%)
rename patches/server/{0365-Remove-garbage-Java-version-check.patch => 0356-Remove-garbage-Java-version-check.patch} (100%)
rename patches/server/{0366-Add-ThrownEggHatchEvent.patch => 0357-Add-ThrownEggHatchEvent.patch} (100%)
rename patches/server/{0367-Entity-Jump-API.patch => 0358-Entity-Jump-API.patch} (100%)
rename patches/server/{0368-Add-option-to-nerf-pigmen-from-nether-portals.patch => 0359-Add-option-to-nerf-pigmen-from-nether-portals.patch} (91%)
rename patches/server/{0369-Make-the-GUI-graph-fancier.patch => 0360-Make-the-GUI-graph-fancier.patch} (100%)
delete mode 100644 patches/server/0361-Avoid-hopper-searches-if-there-are-no-items.patch
rename patches/server/{0370-add-hand-to-BlockMultiPlaceEvent.patch => 0361-add-hand-to-BlockMultiPlaceEvent.patch} (100%)
rename patches/server/{0371-Validate-tripwire-hook-placement-before-update.patch => 0362-Validate-tripwire-hook-placement-before-update.patch} (100%)
rename patches/server/{0372-Add-option-to-allow-iron-golems-to-spawn-in-air.patch => 0363-Add-option-to-allow-iron-golems-to-spawn-in-air.patch} (100%)
rename patches/server/{0373-Configurable-chance-of-villager-zombie-infection.patch => 0364-Configurable-chance-of-villager-zombie-infection.patch} (100%)
rename patches/server/{0374-Optimise-Chunk-getFluid.patch => 0365-Optimise-Chunk-getFluid.patch} (92%)
rename patches/server/{0375-Set-spigots-verbose-world-setting-to-false-by-def.patch => 0366-Set-spigots-verbose-world-setting-to-false-by-def.patch} (100%)
rename patches/server/{0376-Add-tick-times-API-and-mspt-command.patch => 0367-Add-tick-times-API-and-mspt-command.patch} (96%)
rename patches/server/{0377-Expose-MinecraftServer-isRunning.patch => 0368-Expose-MinecraftServer-isRunning.patch} (90%)
rename patches/server/{0378-Add-Raw-Byte-ItemStack-Serialization.patch => 0369-Add-Raw-Byte-ItemStack-Serialization.patch} (100%)
rename patches/server/{0379-Pillager-patrol-spawn-settings-and-per-player-option.patch => 0370-Pillager-patrol-spawn-settings-and-per-player-option.patch} (96%)
rename patches/server/{0380-Remote-Connections-shouldn-t-hold-up-shutdown.patch => 0371-Remote-Connections-shouldn-t-hold-up-shutdown.patch} (92%)
rename patches/server/{0381-Do-not-allow-bees-to-load-chunks-for-beehives.patch => 0372-Do-not-allow-bees-to-load-chunks-for-beehives.patch} (100%)
rename patches/server/{0382-Prevent-Double-PlayerChunkMap-adds-crashing-server.patch => 0373-Prevent-Double-PlayerChunkMap-adds-crashing-server.patch} (87%)
rename patches/server/{0383-Don-t-tick-dead-players.patch => 0374-Don-t-tick-dead-players.patch} (85%)
rename patches/server/{0384-Dead-Player-s-shouldn-t-be-able-to-move.patch => 0375-Dead-Player-s-shouldn-t-be-able-to-move.patch} (100%)
rename patches/server/{0385-Optimize-Collision-to-not-load-chunks.patch => 0376-Optimize-Collision-to-not-load-chunks.patch} (97%)
rename patches/server/{0386-Don-t-move-existing-players-to-world-spawn.patch => 0377-Don-t-move-existing-players-to-world-spawn.patch} (89%)
rename patches/server/{0387-Optimize-GoalSelector-Goal.Flag-Set-operations.patch => 0378-Optimize-GoalSelector-Goal.Flag-Set-operations.patch} (99%)
rename patches/server/{0388-Improved-Watchdog-Support.patch => 0379-Improved-Watchdog-Support.patch} (87%)
rename patches/server/{0389-Optimize-Pathfinding.patch => 0380-Optimize-Pathfinding.patch} (100%)
rename patches/server/{0390-Reduce-Either-Optional-allocation.patch => 0381-Reduce-Either-Optional-allocation.patch} (97%)
rename patches/server/{0391-Reduce-memory-footprint-of-NBTTagCompound.patch => 0382-Reduce-memory-footprint-of-NBTTagCompound.patch} (97%)
rename patches/server/{0392-Prevent-opening-inventories-when-frozen.patch => 0383-Prevent-opening-inventories-when-frozen.patch} (94%)
rename patches/server/{0394-Don-t-run-entity-collision-code-if-not-needed.patch => 0384-Don-t-run-entity-collision-code-if-not-needed.patch} (100%)
rename patches/server/{0395-Implement-Player-Client-Options-API.patch => 0385-Implement-Player-Client-Options-API.patch} (91%)
rename patches/server/{0396-Don-t-crash-if-player-is-attempted-to-be-removed-fro.patch => 0386-Don-t-crash-if-player-is-attempted-to-be-removed-fro.patch} (81%)
rename patches/server/{0398-Fix-Longstanding-Broken-behavior-of-PlayerJoinEvent.patch => 0387-Fix-Longstanding-Broken-behavior-of-PlayerJoinEvent.patch} (93%)
rename patches/server/{0399-Load-Chunks-for-Login-Asynchronously.patch => 0388-Load-Chunks-for-Login-Asynchronously.patch} (95%)
rename patches/server/{0400-Move-player-to-spawn-point-if-spawn-in-unloaded-worl.patch => 0389-Move-player-to-spawn-point-if-spawn-in-unloaded-worl.patch} (89%)
rename patches/server/{0401-Add-PlayerAttackEntityCooldownResetEvent.patch => 0390-Add-PlayerAttackEntityCooldownResetEvent.patch} (100%)
rename patches/server/{0402-Don-t-fire-BlockFade-on-worldgen-threads.patch => 0391-Don-t-fire-BlockFade-on-worldgen-threads.patch} (100%)
rename patches/server/{0403-Add-phantom-creative-and-insomniac-controls.patch => 0392-Add-phantom-creative-and-insomniac-controls.patch} (100%)
rename patches/server/{0404-Fix-numerous-item-duplication-issues-and-teleport-is.patch => 0393-Fix-numerous-item-duplication-issues-and-teleport-is.patch} (96%)
rename patches/server/{0405-Villager-Restocks-API.patch => 0394-Villager-Restocks-API.patch} (100%)
rename patches/server/{0406-Validate-PickItem-Packet-and-kick-for-invalid.patch => 0395-Validate-PickItem-Packet-and-kick-for-invalid.patch} (100%)
rename patches/server/{0407-Expose-game-version.patch => 0396-Expose-game-version.patch} (89%)
rename patches/server/{0408-Optimize-Voxel-Shape-Merging.patch => 0397-Optimize-Voxel-Shape-Merging.patch} (100%)
rename patches/server/{0409-Set-cap-on-JDK-per-thread-native-byte-buffer-cache.patch => 0398-Set-cap-on-JDK-per-thread-native-byte-buffer-cache.patch} (100%)
rename patches/server/{0410-misc-debugging-dumps.patch => 0399-misc-debugging-dumps.patch} (91%)
rename patches/server/{0411-Prevent-teleporting-dead-entities.patch => 0400-Prevent-teleporting-dead-entities.patch} (100%)
rename patches/server/{0412-Deobfuscate-stacktraces-in-log-messages-crash-report.patch => 0401-Deobfuscate-stacktraces-in-log-messages-crash-report.patch} (98%)
rename patches/server/{0413-Implement-Mob-Goal-API.patch => 0402-Implement-Mob-Goal-API.patch} (99%)
rename patches/server/{0414-Add-villager-reputation-API.patch => 0403-Add-villager-reputation-API.patch} (100%)
rename patches/server/{0415-Option-for-maximum-exp-value-when-merging-orbs.patch => 0404-Option-for-maximum-exp-value-when-merging-orbs.patch} (100%)
rename patches/server/{0416-ExperienceOrbMergeEvent.patch => 0405-ExperienceOrbMergeEvent.patch} (100%)
rename patches/server/{0417-Fix-PotionEffect-ignores-icon-flag.patch => 0406-Fix-PotionEffect-ignores-icon-flag.patch} (100%)
rename patches/server/{0418-Optimize-brigadier-child-sorting-performance.patch => 0407-Optimize-brigadier-child-sorting-performance.patch} (100%)
rename patches/server/{0419-Potential-bed-API.patch => 0408-Potential-bed-API.patch} (100%)
rename patches/server/{0420-Wait-for-Async-Tasks-during-shutdown.patch => 0409-Wait-for-Async-Tasks-during-shutdown.patch} (91%)
rename patches/server/{0421-Ensure-EntityRaider-respects-game-and-entity-rules-f.patch => 0410-Ensure-EntityRaider-respects-game-and-entity-rules-f.patch} (100%)
rename patches/server/{0422-Protect-Bedrock-and-End-Portal-Frames-from-being-des.patch => 0411-Protect-Bedrock-and-End-Portal-Frames-from-being-des.patch} (97%)
rename patches/server/{0423-Reduce-MutableInt-allocations-from-light-engine.patch => 0412-Reduce-MutableInt-allocations-from-light-engine.patch} (98%)
rename patches/server/{0424-Reduce-allocation-of-Vec3D-by-entity-tracker.patch => 0413-Reduce-allocation-of-Vec3D-by-entity-tracker.patch} (91%)
rename patches/server/{0425-Ensure-safe-gateway-teleport.patch => 0414-Ensure-safe-gateway-teleport.patch} (100%)
rename patches/server/{0426-Add-option-for-console-having-all-permissions.patch => 0415-Add-option-for-console-having-all-permissions.patch} (100%)
rename patches/server/{0427-Optimize-anyPlayerCloseEnoughForSpawning-to-use-dist.patch => 0416-Optimize-anyPlayerCloseEnoughForSpawning-to-use-dist.patch} (83%)
rename patches/server/{0428-Use-distance-map-to-optimise-entity-tracker.patch => 0417-Use-distance-map-to-optimise-entity-tracker.patch} (83%)
rename patches/server/{0430-Fix-villager-trading-demand-MC-163962.patch => 0418-Fix-villager-trading-demand-MC-163962.patch} (100%)
rename patches/server/{0431-Maps-shouldn-t-load-chunks.patch => 0419-Maps-shouldn-t-load-chunks.patch} (100%)
rename patches/server/{0432-Use-seed-based-lookup-for-Treasure-Maps-Fixes-lag-fr.patch => 0420-Use-seed-based-lookup-for-Treasure-Maps-Fixes-lag-fr.patch} (100%)
rename patches/server/{0433-Fix-CraftScheduler-runTaskTimerAsynchronously-Plugin.patch => 0421-Fix-CraftScheduler-runTaskTimerAsynchronously-Plugin.patch} (100%)
rename patches/server/{0434-Fix-piston-physics-inconsistency-MC-188840.patch => 0422-Fix-piston-physics-inconsistency-MC-188840.patch} (100%)
rename patches/server/{0435-Fix-sand-duping.patch => 0423-Fix-sand-duping.patch} (100%)
rename patches/server/{0436-Fix-missing-chunks-due-to-integer-overflow.patch => 0424-Fix-missing-chunks-due-to-integer-overflow.patch} (100%)
rename patches/server/{0437-Prevent-position-desync-in-playerconnection-causing-.patch => 0425-Prevent-position-desync-in-playerconnection-causing-.patch} (100%)
rename patches/server/{0438-Inventory-getHolder-method-without-block-snapshot.patch => 0426-Inventory-getHolder-method-without-block-snapshot.patch} (100%)
rename patches/server/{0439-Improve-Arrow-API.patch => 0427-Improve-Arrow-API.patch} (100%)
rename patches/server/{0440-Add-and-implement-PlayerRecipeBookClickEvent.patch => 0428-Add-and-implement-PlayerRecipeBookClickEvent.patch} (100%)
rename patches/server/{0441-Hide-sync-chunk-writes-behind-flag.patch => 0429-Hide-sync-chunk-writes-behind-flag.patch} (100%)
rename patches/server/{0442-Add-permission-for-command-blocks.patch => 0430-Add-permission-for-command-blocks.patch} (100%)
rename patches/server/{0443-Ensure-Entity-AABB-s-are-never-invalid.patch => 0431-Ensure-Entity-AABB-s-are-never-invalid.patch} (74%)
rename patches/server/{0444-Fix-Per-World-Difficulty-Remembering-Difficulty.patch => 0432-Fix-Per-World-Difficulty-Remembering-Difficulty.patch} (93%)
rename patches/server/{0445-Paper-dumpitem-command.patch => 0433-Paper-dumpitem-command.patch} (95%)
rename patches/server/{0446-Don-t-allow-null-UUID-s-for-chat.patch => 0434-Don-t-allow-null-UUID-s-for-chat.patch} (100%)
rename patches/server/{0447-Improve-Legacy-Component-serialization-size.patch => 0435-Improve-Legacy-Component-serialization-size.patch} (100%)
rename patches/server/{0448-Optimize-Bit-Operations-by-inlining.patch => 0436-Optimize-Bit-Operations-by-inlining.patch} (100%)
rename patches/server/{0449-Add-Plugin-Tickets-to-API-Chunk-Methods.patch => 0437-Add-Plugin-Tickets-to-API-Chunk-Methods.patch} (93%)
create mode 100644 patches/server/0438-incremental-chunk-and-player-saving.patch
rename patches/server/{0451-Stop-copy-on-write-operations-for-updating-light-dat.patch => 0439-Stop-copy-on-write-operations-for-updating-light-dat.patch} (99%)
rename patches/server/{0452-Support-old-UUID-format-for-NBT.patch => 0440-Support-old-UUID-format-for-NBT.patch} (100%)
rename patches/server/{0453-Clean-up-duplicated-GameProfile-Properties.patch => 0441-Clean-up-duplicated-GameProfile-Properties.patch} (100%)
rename patches/server/{0454-Convert-legacy-attributes-in-Item-Meta.patch => 0442-Convert-legacy-attributes-in-Item-Meta.patch} (100%)
rename patches/server/{0455-Remove-some-streams-from-structures.patch => 0443-Remove-some-streams-from-structures.patch} (100%)
rename patches/server/{0456-Remove-streams-from-classes-related-villager-gossip.patch => 0444-Remove-streams-from-classes-related-villager-gossip.patch} (100%)
rename patches/server/{0457-Support-components-in-ItemMeta.patch => 0445-Support-components-in-ItemMeta.patch} (100%)
rename patches/server/{0458-Improve-EntityTargetLivingEntityEvent-for-1.16-mobs.patch => 0446-Improve-EntityTargetLivingEntityEvent-for-1.16-mobs.patch} (100%)
rename patches/server/{0459-Add-entity-liquid-API.patch => 0447-Add-entity-liquid-API.patch} (100%)
rename patches/server/{0460-Update-itemstack-legacy-name-and-lore.patch => 0448-Update-itemstack-legacy-name-and-lore.patch} (100%)
rename patches/server/{0461-Spawn-player-in-correct-world-on-login.patch => 0449-Spawn-player-in-correct-world-on-login.patch} (94%)
rename patches/server/{0462-Add-PrepareResultEvent.patch => 0450-Add-PrepareResultEvent.patch} (100%)
delete mode 100644 patches/server/0450-incremental-chunk-and-player-saving.patch
rename patches/server/{0463-Don-t-check-chunk-for-portal-on-world-gen-entity-add.patch => 0451-Don-t-check-chunk-for-portal-on-world-gen-entity-add.patch} (100%)
rename patches/server/{0464-Optimize-NetworkManager-Exception-Handling.patch => 0452-Optimize-NetworkManager-Exception-Handling.patch} (100%)
rename patches/server/{0465-Optimize-the-advancement-data-player-iteration-to-be.patch => 0453-Optimize-the-advancement-data-player-iteration-to-be.patch} (100%)
rename patches/server/{0466-Fix-arrows-never-despawning-MC-125757.patch => 0454-Fix-arrows-never-despawning-MC-125757.patch} (100%)
rename patches/server/{0467-Thread-Safe-Vanilla-Command-permission-checking.patch => 0455-Thread-Safe-Vanilla-Command-permission-checking.patch} (100%)
rename patches/server/{0468-Fix-SPIGOT-5989.patch => 0456-Fix-SPIGOT-5989.patch} (97%)
rename patches/server/{0469-Fix-SPIGOT-5824-Bukkit-world-container-is-not-used.patch => 0457-Fix-SPIGOT-5824-Bukkit-world-container-is-not-used.patch} (100%)
rename patches/server/{0470-Fix-SPIGOT-5885-Unable-to-disable-advancements.patch => 0458-Fix-SPIGOT-5885-Unable-to-disable-advancements.patch} (100%)
rename patches/server/{0471-Fix-AdvancementDataPlayer-leak-due-from-quitting-ear.patch => 0459-Fix-AdvancementDataPlayer-leak-due-from-quitting-ear.patch} (98%)
rename patches/server/{0472-Add-missing-strikeLighting-call-to-World-spigot-stri.patch => 0460-Add-missing-strikeLighting-call-to-World-spigot-stri.patch} (83%)
rename patches/server/{0473-Fix-some-rails-connecting-improperly.patch => 0461-Fix-some-rails-connecting-improperly.patch} (98%)
rename patches/server/{0474-Fix-regex-mistake-in-CB-NBT-int-deserialization.patch => 0462-Fix-regex-mistake-in-CB-NBT-int-deserialization.patch} (100%)
rename patches/server/{0475-Do-not-let-the-server-load-chunks-from-newer-version.patch => 0463-Do-not-let-the-server-load-chunks-from-newer-version.patch} (93%)
rename patches/server/{0476-Brand-support.patch => 0464-Brand-support.patch} (96%)
rename patches/server/{0477-Add-setMaxPlayers-API.patch => 0465-Add-setMaxPlayers-API.patch} (89%)
rename patches/server/{0478-Add-playPickupItemAnimation-to-LivingEntity.patch => 0466-Add-playPickupItemAnimation-to-LivingEntity.patch} (100%)
rename patches/server/{0479-Don-t-require-FACING-data.patch => 0467-Don-t-require-FACING-data.patch} (100%)
rename patches/server/{0480-Fix-SpawnChangeEvent-not-firing-for-all-use-cases.patch => 0468-Fix-SpawnChangeEvent-not-firing-for-all-use-cases.patch} (89%)
rename patches/server/{0481-Add-moon-phase-API.patch => 0469-Add-moon-phase-API.patch} (100%)
rename patches/server/{0483-Prevent-headless-pistons-from-being-created.patch => 0470-Prevent-headless-pistons-from-being-created.patch} (100%)
rename patches/server/{0484-Add-BellRingEvent.patch => 0471-Add-BellRingEvent.patch} (100%)
rename patches/server/{0485-Add-zombie-targets-turtle-egg-config.patch => 0472-Add-zombie-targets-turtle-egg-config.patch} (100%)
rename patches/server/{0486-Buffer-joins-to-world.patch => 0473-Buffer-joins-to-world.patch} (92%)
rename patches/server/{0487-Eigencraft-redstone-implementation.patch => 0474-Eigencraft-redstone-implementation.patch} (100%)
rename patches/server/{0488-Fix-hex-colors-not-working-in-some-kick-messages.patch => 0475-Fix-hex-colors-not-working-in-some-kick-messages.patch} (100%)
rename patches/server/{0489-PortalCreateEvent-needs-to-know-its-entity.patch => 0476-PortalCreateEvent-needs-to-know-its-entity.patch} (98%)
rename patches/server/{0490-Fix-CraftTeam-null-check.patch => 0477-Fix-CraftTeam-null-check.patch} (100%)
rename patches/server/{0491-Add-more-Evoker-API.patch => 0478-Add-more-Evoker-API.patch} (100%)
rename patches/server/{0492-Add-methods-to-get-translation-keys.patch => 0479-Add-methods-to-get-translation-keys.patch} (100%)
rename patches/server/{0493-Create-HoverEvent-from-ItemStack-Entity.patch => 0480-Create-HoverEvent-from-ItemStack-Entity.patch} (100%)
rename patches/server/{0494-Cache-block-data-strings.patch => 0481-Cache-block-data-strings.patch} (95%)
rename patches/server/{0495-Fix-Entity-Teleportation-and-cancel-velocity-if-tele.patch => 0482-Fix-Entity-Teleportation-and-cancel-velocity-if-tele.patch} (96%)
rename patches/server/{0496-Add-additional-open-container-api-to-HumanEntity.patch => 0483-Add-additional-open-container-api-to-HumanEntity.patch} (100%)
rename patches/server/{0497-Cache-DataFixerUpper-Rewrite-Rules-on-demand.patch => 0484-Cache-DataFixerUpper-Rewrite-Rules-on-demand.patch} (100%)
rename patches/server/{0498-Extend-block-drop-capture-to-capture-all-items-added.patch => 0485-Extend-block-drop-capture-to-capture-all-items-added.patch} (94%)
rename patches/server/{0500-Expose-the-Entity-Counter-to-allow-plugins-to-use-va.patch => 0486-Expose-the-Entity-Counter-to-allow-plugins-to-use-va.patch} (90%)
rename patches/server/{0501-Lazily-track-plugin-scoreboards-by-default.patch => 0487-Lazily-track-plugin-scoreboards-by-default.patch} (100%)
rename patches/server/{0502-Entity-isTicking.patch => 0488-Entity-isTicking.patch} (91%)
rename patches/server/{0503-Fix-deop-kicking-non-whitelisted-player-when-white-l.patch => 0489-Fix-deop-kicking-non-whitelisted-player-when-white-l.patch} (90%)
rename patches/server/{0504-Fix-Concurrency-issue-in-ShufflingList.patch => 0490-Fix-Concurrency-issue-in-ShufflingList.patch} (100%)
rename patches/server/{0505-Reset-Ender-Crystals-on-Dragon-Spawn.patch => 0491-Reset-Ender-Crystals-on-Dragon-Spawn.patch} (100%)
rename patches/server/{0506-Fix-for-large-move-vectors-crashing-server.patch => 0492-Fix-for-large-move-vectors-crashing-server.patch} (99%)
rename patches/server/{0507-Optimise-getType-calls.patch => 0493-Optimise-getType-calls.patch} (98%)
rename patches/server/{0508-Villager-resetOffers.patch => 0494-Villager-resetOffers.patch} (100%)
rename patches/server/{0509-Improve-inlinig-for-some-hot-IBlockData-methods.patch => 0495-Improve-inlinig-for-some-hot-IBlockData-methods.patch} (88%)
rename patches/server/{0510-Retain-block-place-order-when-capturing-blockstates.patch => 0496-Retain-block-place-order-when-capturing-blockstates.patch} (90%)
rename patches/server/{0511-Reduce-blockpos-allocation-from-pathfinding.patch => 0497-Reduce-blockpos-allocation-from-pathfinding.patch} (96%)
rename patches/server/{0512-Fix-item-locations-dropped-from-campfires.patch => 0498-Fix-item-locations-dropped-from-campfires.patch} (100%)
delete mode 100644 patches/server/0499-Don-t-mark-dirty-in-invalid-locations-SPIGOT-6086.patch
rename patches/server/{0513-Player-elytra-boost-API.patch => 0499-Player-elytra-boost-API.patch} (90%)
rename patches/server/{0514-Fixed-TileEntityBell-memory-leak.patch => 0500-Fixed-TileEntityBell-memory-leak.patch} (100%)
rename patches/server/{0515-Avoid-error-bubbling-up-when-item-stack-is-empty-in-.patch => 0501-Avoid-error-bubbling-up-when-item-stack-is-empty-in-.patch} (100%)
rename patches/server/{0516-Add-getOfflinePlayerIfCached-String.patch => 0502-Add-getOfflinePlayerIfCached-String.patch} (93%)
rename patches/server/{0517-Add-ignore-discounts-API.patch => 0503-Add-ignore-discounts-API.patch} (100%)
rename patches/server/{0518-Toggle-for-removing-existing-dragon.patch => 0504-Toggle-for-removing-existing-dragon.patch} (100%)
rename patches/server/{0519-Fix-client-lag-on-advancement-loading.patch => 0505-Fix-client-lag-on-advancement-loading.patch} (100%)
rename patches/server/{0520-Item-no-age-no-player-pickup.patch => 0506-Item-no-age-no-player-pickup.patch} (100%)
rename patches/server/{0521-Optimize-Pathfinder-Remove-Streams-Optimized-collect.patch => 0507-Optimize-Pathfinder-Remove-Streams-Optimized-collect.patch} (100%)
rename patches/server/{0522-Beacon-API-custom-effect-ranges.patch => 0508-Beacon-API-custom-effect-ranges.patch} (100%)
rename patches/server/{0523-Add-API-for-quit-reason.patch => 0509-Add-API-for-quit-reason.patch} (92%)
rename patches/server/{0524-Add-Wandering-Trader-spawn-rate-config-options.patch => 0510-Add-Wandering-Trader-spawn-rate-config-options.patch} (100%)
rename patches/server/{0525-Expose-world-spawn-angle.patch => 0511-Expose-world-spawn-angle.patch} (93%)
rename patches/server/{0526-Add-Destroy-Speed-API.patch => 0512-Add-Destroy-Speed-API.patch} (100%)
rename patches/server/{0527-Fix-Player-spawnParticle-x-y-z-precision-loss.patch => 0513-Fix-Player-spawnParticle-x-y-z-precision-loss.patch} (89%)
rename patches/server/{0528-Add-LivingEntity-clearActiveItem.patch => 0514-Add-LivingEntity-clearActiveItem.patch} (100%)
rename patches/server/{0529-Add-PlayerItemCooldownEvent.patch => 0515-Add-PlayerItemCooldownEvent.patch} (100%)
rename patches/server/{0530-Significantly-improve-performance-of-the-end-generat.patch => 0516-Significantly-improve-performance-of-the-end-generat.patch} (100%)
rename patches/server/{0531-More-lightning-API.patch => 0517-More-lightning-API.patch} (100%)
rename patches/server/{0532-Climbing-should-not-bypass-cramming-gamerule.patch => 0518-Climbing-should-not-bypass-cramming-gamerule.patch} (98%)
rename patches/server/{0533-Added-missing-default-perms-for-commands.patch => 0519-Added-missing-default-perms-for-commands.patch} (100%)
rename patches/server/{0534-Add-PlayerShearBlockEvent.patch => 0520-Add-PlayerShearBlockEvent.patch} (100%)
rename patches/server/{0535-Fix-curing-zombie-villager-discount-exploit.patch => 0521-Fix-curing-zombie-villager-discount-exploit.patch} (100%)
rename patches/server/{0536-Limit-recipe-packets.patch => 0522-Limit-recipe-packets.patch} (100%)
rename patches/server/{0537-Fix-CraftSound-backwards-compatibility.patch => 0523-Fix-CraftSound-backwards-compatibility.patch} (100%)
rename patches/server/{0538-Player-Chunk-Load-Unload-Events.patch => 0524-Player-Chunk-Load-Unload-Events.patch} (90%)
rename patches/server/{0539-Optimize-Dynamic-get-Missing-Keys.patch => 0525-Optimize-Dynamic-get-Missing-Keys.patch} (100%)
rename patches/server/{0540-Expose-LivingEntity-hurt-direction.patch => 0526-Expose-LivingEntity-hurt-direction.patch} (100%)
rename patches/server/{0541-Add-OBSTRUCTED-reason-to-BedEnterResult.patch => 0527-Add-OBSTRUCTED-reason-to-BedEnterResult.patch} (100%)
rename patches/server/{0542-Do-not-crash-from-invalid-ingredient-lists-in-Villag.patch => 0528-Do-not-crash-from-invalid-ingredient-lists-in-Villag.patch} (100%)
rename patches/server/{0543-Add-PlayerTradeEvent-and-PlayerPurchaseEvent.patch => 0529-Add-PlayerTradeEvent-and-PlayerPurchaseEvent.patch} (100%)
rename patches/server/{0544-Implement-TargetHitEvent.patch => 0530-Implement-TargetHitEvent.patch} (100%)
rename patches/server/{0545-MC-4-Fix-item-position-desync.patch => 0531-MC-4-Fix-item-position-desync.patch} (88%)
rename patches/server/{0546-Additional-Block-Material-API-s.patch => 0532-Additional-Block-Material-API-s.patch} (100%)
rename patches/server/{0547-Fix-harming-potion-dupe.patch => 0533-Fix-harming-potion-dupe.patch} (100%)
rename patches/server/{0548-Implement-API-to-get-Material-from-Boats-and-Minecar.patch => 0534-Implement-API-to-get-Material-from-Boats-and-Minecar.patch} (100%)
rename patches/server/{0549-Cache-burn-durations.patch => 0535-Cache-burn-durations.patch} (100%)
rename patches/server/{0550-Allow-disabling-mob-spawner-spawn-egg-transformation.patch => 0536-Allow-disabling-mob-spawner-spawn-egg-transformation.patch} (100%)
rename patches/server/{0551-Fix-Not-a-string-Map-Conversion-spam.patch => 0537-Fix-Not-a-string-Map-Conversion-spam.patch} (100%)
rename patches/server/{0552-Implement-PlayerFlowerPotManipulateEvent.patch => 0538-Implement-PlayerFlowerPotManipulateEvent.patch} (100%)
rename patches/server/{0553-Fix-interact-event-not-being-called-in-adventure.patch => 0539-Fix-interact-event-not-being-called-in-adventure.patch} (100%)
rename patches/server/{0554-Zombie-API-breaking-doors.patch => 0540-Zombie-API-breaking-doors.patch} (100%)
rename patches/server/{0555-Fix-nerfed-slime-when-splitting.patch => 0541-Fix-nerfed-slime-when-splitting.patch} (100%)
rename patches/server/{0556-Add-EntityLoadCrossbowEvent.patch => 0542-Add-EntityLoadCrossbowEvent.patch} (100%)
rename patches/server/{0557-Guardian-beam-workaround.patch => 0543-Guardian-beam-workaround.patch} (100%)
rename patches/server/{0558-Added-WorldGameRuleChangeEvent.patch => 0544-Added-WorldGameRuleChangeEvent.patch} (96%)
rename patches/server/{0559-Added-ServerResourcesReloadedEvent.patch => 0545-Added-ServerResourcesReloadedEvent.patch} (93%)
rename patches/server/{0560-Added-world-settings-for-mobs-picking-up-loot.patch => 0546-Added-world-settings-for-mobs-picking-up-loot.patch} (100%)
rename patches/server/{0561-Implemented-BlockFailedDispenseEvent.patch => 0547-Implemented-BlockFailedDispenseEvent.patch} (100%)
rename patches/server/{0562-Added-PlayerLecternPageChangeEvent.patch => 0548-Added-PlayerLecternPageChangeEvent.patch} (100%)
rename patches/server/{0563-Added-PlayerLoomPatternSelectEvent.patch => 0549-Added-PlayerLoomPatternSelectEvent.patch} (100%)
rename patches/server/{0564-Configurable-door-breaking-difficulty.patch => 0550-Configurable-door-breaking-difficulty.patch} (100%)
rename patches/server/{0565-Empty-commands-shall-not-be-dispatched.patch => 0551-Empty-commands-shall-not-be-dispatched.patch} (100%)
rename patches/server/{0566-Implement-API-to-expose-exact-interaction-point.patch => 0552-Implement-API-to-expose-exact-interaction-point.patch} (100%)
rename patches/server/{0567-Remove-stale-POIs.patch => 0553-Remove-stale-POIs.patch} (86%)
rename patches/server/{0568-Fix-villager-boat-exploit.patch => 0554-Fix-villager-boat-exploit.patch} (93%)
rename patches/server/{0569-Add-sendOpLevel-API.patch => 0555-Add-sendOpLevel-API.patch} (89%)
rename patches/server/{0570-Add-PaperRegistry.patch => 0556-Add-PaperRegistry.patch} (98%)
rename patches/server/{0571-Add-StructuresLocateEvent.patch => 0557-Add-StructuresLocateEvent.patch} (99%)
rename patches/server/{0572-Collision-option-for-requiring-a-player-participant.patch => 0558-Collision-option-for-requiring-a-player-participant.patch} (94%)
rename patches/server/{0573-Remove-ProjectileHitEvent-call-when-fireballs-dead.patch => 0559-Remove-ProjectileHitEvent-call-when-fireballs-dead.patch} (100%)
rename patches/server/{0574-Return-chat-component-with-empty-text-instead-of-thr.patch => 0560-Return-chat-component-with-empty-text-instead-of-thr.patch} (100%)
rename patches/server/{0575-Make-schedule-command-per-world.patch => 0561-Make-schedule-command-per-world.patch} (100%)
rename patches/server/{0576-Configurable-max-leash-distance.patch => 0562-Configurable-max-leash-distance.patch} (100%)
rename patches/server/{0577-Implement-BlockPreDispenseEvent.patch => 0563-Implement-BlockPreDispenseEvent.patch} (100%)
rename patches/server/{0578-Added-firing-of-PlayerChangeBeaconEffectEvent.patch => 0564-Added-firing-of-PlayerChangeBeaconEffectEvent.patch} (100%)
rename patches/server/{0579-Add-toggle-for-always-placing-the-dragon-egg.patch => 0565-Add-toggle-for-always-placing-the-dragon-egg.patch} (100%)
rename patches/server/{0580-Added-PlayerStonecutterRecipeSelectEvent.patch => 0566-Added-PlayerStonecutterRecipeSelectEvent.patch} (100%)
rename patches/server/{0581-Add-dropLeash-variable-to-EntityUnleashEvent.patch => 0567-Add-dropLeash-variable-to-EntityUnleashEvent.patch} (100%)
rename patches/server/{0582-Reset-shield-blocking-on-dimension-change.patch => 0568-Reset-shield-blocking-on-dimension-change.patch} (83%)
rename patches/server/{0583-add-DragonEggFormEvent.patch => 0569-add-DragonEggFormEvent.patch} (100%)
rename patches/server/{0584-EntityMoveEvent.patch => 0570-EntityMoveEvent.patch} (93%)
rename patches/server/{0585-added-option-to-disable-pathfinding-updates-on-block.patch => 0571-added-option-to-disable-pathfinding-updates-on-block.patch} (80%)
rename patches/server/{0586-Inline-shift-direction-fields.patch => 0572-Inline-shift-direction-fields.patch} (100%)
rename patches/server/{0587-Allow-adding-items-to-BlockDropItemEvent.patch => 0573-Allow-adding-items-to-BlockDropItemEvent.patch} (100%)
rename patches/server/{0588-Add-getMainThreadExecutor-to-BukkitScheduler.patch => 0574-Add-getMainThreadExecutor-to-BukkitScheduler.patch} (100%)
rename patches/server/{0589-living-entity-allow-attribute-registration.patch => 0575-living-entity-allow-attribute-registration.patch} (100%)
rename patches/server/{0590-fix-dead-slime-setSize-invincibility.patch => 0576-fix-dead-slime-setSize-invincibility.patch} (100%)
rename patches/server/{0591-Merchant-getRecipes-should-return-an-immutable-list.patch => 0577-Merchant-getRecipes-should-return-an-immutable-list.patch} (100%)
rename patches/server/{0592-Add-support-for-hex-color-codes-in-console.patch => 0578-Add-support-for-hex-color-codes-in-console.patch} (99%)
rename patches/server/{0593-Expose-Tracked-Players.patch => 0579-Expose-Tracked-Players.patch} (100%)
rename patches/server/{0594-Remove-streams-from-SensorNearest.patch => 0580-Remove-streams-from-SensorNearest.patch} (100%)
rename patches/server/{0595-Throw-proper-exception-on-empty-JsonList-file.patch => 0581-Throw-proper-exception-on-empty-JsonList-file.patch} (100%)
rename patches/server/{0596-Improve-ServerGUI.patch => 0582-Improve-ServerGUI.patch} (100%)
rename patches/server/{0597-stop-firing-pressure-plate-EntityInteractEvent-for-i.patch => 0583-stop-firing-pressure-plate-EntityInteractEvent-for-i.patch} (100%)
rename patches/server/{0598-fix-converting-txt-to-json-file.patch => 0584-fix-converting-txt-to-json-file.patch} (93%)
rename patches/server/{0599-Add-worldborder-events.patch => 0585-Add-worldborder-events.patch} (100%)
rename patches/server/{0600-added-PlayerNameEntityEvent.patch => 0586-added-PlayerNameEntityEvent.patch} (100%)
rename patches/server/{0601-Prevent-grindstones-from-overstacking-items.patch => 0587-Prevent-grindstones-from-overstacking-items.patch} (100%)
rename patches/server/{0602-Add-recipe-to-cook-events.patch => 0588-Add-recipe-to-cook-events.patch} (100%)
rename patches/server/{0603-Add-Block-isValidTool.patch => 0589-Add-Block-isValidTool.patch} (100%)
rename patches/server/{0604-Allow-using-signs-inside-spawn-protection.patch => 0590-Allow-using-signs-inside-spawn-protection.patch} (100%)
rename patches/server/{0605-Expand-world-key-API.patch => 0591-Expand-world-key-API.patch} (97%)
rename patches/server/{0606-Add-fast-alternative-constructor-for-Rotations.patch => 0592-Add-fast-alternative-constructor-for-Rotations.patch} (100%)
rename patches/server/{0607-Item-Rarity-API.patch => 0593-Item-Rarity-API.patch} (100%)
rename patches/server/{0608-Only-set-despawnTimer-for-Wandering-Traders-spawned-.patch => 0594-Only-set-despawnTimer-for-Wandering-Traders-spawned-.patch} (100%)
rename patches/server/{0609-copy-TESign-isEditable-from-snapshots.patch => 0595-copy-TESign-isEditable-from-snapshots.patch} (100%)
rename patches/server/{0610-Drop-carried-item-when-player-has-disconnected.patch => 0596-Drop-carried-item-when-player-has-disconnected.patch} (92%)
rename patches/server/{0611-forced-whitelist-use-configurable-kick-message.patch => 0597-forced-whitelist-use-configurable-kick-message.patch} (87%)
rename patches/server/{0612-Don-t-ignore-result-of-PlayerEditBookEvent.patch => 0598-Don-t-ignore-result-of-PlayerEditBookEvent.patch} (100%)
rename patches/server/{0613-Entity-load-save-limit-per-chunk.patch => 0599-Entity-load-save-limit-per-chunk.patch} (68%)
rename patches/server/{0614-Expose-protocol-version.patch => 0600-Expose-protocol-version.patch} (100%)
rename patches/server/{0615-Enhance-console-tab-completions-for-brigadier-comman.patch => 0601-Enhance-console-tab-completions-for-brigadier-comman.patch} (99%)
rename patches/server/{0616-Fix-PlayerItemConsumeEvent-cancelling-properly.patch => 0602-Fix-PlayerItemConsumeEvent-cancelling-properly.patch} (100%)
rename patches/server/{0617-Add-bypass-host-check.patch => 0603-Add-bypass-host-check.patch} (100%)
rename patches/server/{0618-Set-area-affect-cloud-rotation.patch => 0604-Set-area-affect-cloud-rotation.patch} (100%)
rename patches/server/{0619-add-isDeeplySleeping-to-HumanEntity.patch => 0605-add-isDeeplySleeping-to-HumanEntity.patch} (100%)
rename patches/server/{0620-add-consumeFuel-to-FurnaceBurnEvent.patch => 0606-add-consumeFuel-to-FurnaceBurnEvent.patch} (100%)
rename patches/server/{0621-add-get-set-drop-chance-to-EntityEquipment.patch => 0607-add-get-set-drop-chance-to-EntityEquipment.patch} (100%)
rename patches/server/{0622-fix-PigZombieAngerEvent-cancellation.patch => 0608-fix-PigZombieAngerEvent-cancellation.patch} (100%)
rename patches/server/{0623-Fix-checkReach-check-for-Shulker-boxes.patch => 0609-Fix-checkReach-check-for-Shulker-boxes.patch} (100%)
rename patches/server/{0624-fix-PlayerItemHeldEvent-firing-twice.patch => 0610-fix-PlayerItemHeldEvent-firing-twice.patch} (100%)
rename patches/server/{0625-Added-PlayerDeepSleepEvent.patch => 0611-Added-PlayerDeepSleepEvent.patch} (100%)
rename patches/server/{0626-More-World-API.patch => 0612-More-World-API.patch} (96%)
rename patches/server/{0627-Added-PlayerBedFailEnterEvent.patch => 0613-Added-PlayerBedFailEnterEvent.patch} (100%)
rename patches/server/{0628-Implement-methods-to-convert-between-Component-and-B.patch => 0614-Implement-methods-to-convert-between-Component-and-B.patch} (96%)
rename patches/server/{0629-Fix-anchor-respawn-acting-as-a-bed-respawn-from-the-.patch => 0615-Fix-anchor-respawn-acting-as-a-bed-respawn-from-the-.patch} (95%)
rename patches/server/{0630-Introduce-beacon-activation-deactivation-events.patch => 0616-Introduce-beacon-activation-deactivation-events.patch} (100%)
rename patches/server/{0631-add-RespawnFlags-to-PlayerRespawnEvent.patch => 0617-add-RespawnFlags-to-PlayerRespawnEvent.patch} (97%)
rename patches/server/{0632-Add-Channel-initialization-listeners.patch => 0618-Add-Channel-initialization-listeners.patch} (96%)
rename patches/server/{0633-Send-empty-commands-if-tab-completion-is-disabled.patch => 0619-Send-empty-commands-if-tab-completion-is-disabled.patch} (100%)
rename patches/server/{0634-Add-more-WanderingTrader-API.patch => 0620-Add-more-WanderingTrader-API.patch} (100%)
rename patches/server/{0635-Add-EntityBlockStorage-clearEntities.patch => 0621-Add-EntityBlockStorage-clearEntities.patch} (100%)
rename patches/server/{0636-Add-Adventure-message-to-PlayerAdvancementDoneEvent.patch => 0622-Add-Adventure-message-to-PlayerAdvancementDoneEvent.patch} (100%)
rename patches/server/{0637-Add-raw-address-to-AsyncPlayerPreLoginEvent.patch => 0623-Add-raw-address-to-AsyncPlayerPreLoginEvent.patch} (100%)
rename patches/server/{0638-Inventory-close.patch => 0624-Inventory-close.patch} (100%)
rename patches/server/{0639-call-PortalCreateEvent-players-and-end-platform.patch => 0625-call-PortalCreateEvent-players-and-end-platform.patch} (91%)
rename patches/server/{0640-Add-a-should-burn-in-sunlight-API-for-Phantoms-and-S.patch => 0626-Add-a-should-burn-in-sunlight-API-for-Phantoms-and-S.patch} (100%)
rename patches/server/{0641-Fix-CraftPotionBrewer-cache.patch => 0627-Fix-CraftPotionBrewer-cache.patch} (100%)
rename patches/server/{0642-Add-basic-Datapack-API.patch => 0628-Add-basic-Datapack-API.patch} (98%)
rename patches/server/{0643-Add-environment-variable-to-disable-server-gui.patch => 0629-Add-environment-variable-to-disable-server-gui.patch} (100%)
rename patches/server/{0644-additions-to-PlayerGameModeChangeEvent.patch => 0630-additions-to-PlayerGameModeChangeEvent.patch} (95%)
rename patches/server/{0645-ItemStack-repair-check-API.patch => 0631-ItemStack-repair-check-API.patch} (100%)
rename patches/server/{0646-More-Enchantment-API.patch => 0632-More-Enchantment-API.patch} (100%)
rename patches/server/{0647-Move-range-check-for-block-placing-up.patch => 0633-Move-range-check-for-block-placing-up.patch} (100%)
rename patches/server/{0648-Fix-and-optimise-world-force-upgrading.patch => 0634-Fix-and-optimise-world-force-upgrading.patch} (98%)
rename patches/server/{0649-Add-Mob-lookAt-API.patch => 0635-Add-Mob-lookAt-API.patch} (100%)
rename patches/server/{0650-Add-Unix-domain-socket-support.patch => 0636-Add-Unix-domain-socket-support.patch} (97%)
rename patches/server/{0651-Add-EntityInsideBlockEvent.patch => 0637-Add-EntityInsideBlockEvent.patch} (100%)
rename patches/server/{0652-Attributes-API-for-item-defaults.patch => 0638-Attributes-API-for-item-defaults.patch} (100%)
rename patches/server/{0653-Add-cause-to-Weather-ThunderChangeEvents.patch => 0639-Add-cause-to-Weather-ThunderChangeEvents.patch} (92%)
rename patches/server/{0654-More-Lidded-Block-API.patch => 0640-More-Lidded-Block-API.patch} (100%)
rename patches/server/{0655-Limit-item-frame-cursors-on-maps.patch => 0641-Limit-item-frame-cursors-on-maps.patch} (100%)
rename patches/server/{0656-Add-PlayerKickEvent-causes.patch => 0642-Add-PlayerKickEvent-causes.patch} (98%)
rename patches/server/{0657-Add-PufferFishStateChangeEvent.patch => 0643-Add-PufferFishStateChangeEvent.patch} (100%)
rename patches/server/{0658-Fix-PlayerBucketEmptyEvent-result-itemstack.patch => 0644-Fix-PlayerBucketEmptyEvent-result-itemstack.patch} (100%)
rename patches/server/{0659-Synchronize-PalettedContainer-instead-of-ThreadingDe.patch => 0645-Synchronize-PalettedContainer-instead-of-ThreadingDe.patch} (97%)
rename patches/server/{0660-Add-option-to-fix-items-merging-through-walls.patch => 0646-Add-option-to-fix-items-merging-through-walls.patch} (100%)
rename patches/server/{0661-Add-BellRevealRaiderEvent.patch => 0647-Add-BellRevealRaiderEvent.patch} (100%)
rename patches/server/{0662-Fix-invulnerable-end-crystals.patch => 0648-Fix-invulnerable-end-crystals.patch} (100%)
rename patches/server/{0663-Add-ElderGuardianAppearanceEvent.patch => 0649-Add-ElderGuardianAppearanceEvent.patch} (100%)
rename patches/server/{0664-Fix-dangerous-end-portal-logic.patch => 0650-Fix-dangerous-end-portal-logic.patch} (94%)
rename patches/server/{0665-Optimize-Biome-Mob-Lookups-for-Mob-Spawning.patch => 0651-Optimize-Biome-Mob-Lookups-for-Mob-Spawning.patch} (100%)
rename patches/server/{0666-Make-item-validations-configurable.patch => 0652-Make-item-validations-configurable.patch} (100%)
rename patches/server/{0667-Line-Of-Sight-Changes.patch => 0653-Line-Of-Sight-Changes.patch} (100%)
rename patches/server/{0668-add-per-world-spawn-limits.patch => 0654-add-per-world-spawn-limits.patch} (91%)
rename patches/server/{0669-Fix-PotionSplashEvent-for-water-splash-potions.patch => 0655-Fix-PotionSplashEvent-for-water-splash-potions.patch} (100%)
rename patches/server/{0670-Add-more-LimitedRegion-API.patch => 0656-Add-more-LimitedRegion-API.patch} (100%)
rename patches/server/{0671-Fix-PlayerDropItemEvent-using-wrong-item.patch => 0657-Fix-PlayerDropItemEvent-using-wrong-item.patch} (91%)
rename patches/server/{0672-Missing-Entity-Behavior-API.patch => 0658-Missing-Entity-Behavior-API.patch} (99%)
rename patches/server/{0673-Ensure-disconnect-for-book-edit-is-called-on-main.patch => 0659-Ensure-disconnect-for-book-edit-is-called-on-main.patch} (100%)
rename patches/server/{0674-Fix-return-value-of-Block-applyBoneMeal-always-being.patch => 0660-Fix-return-value-of-Block-applyBoneMeal-always-being.patch} (100%)
rename patches/server/{0675-Use-getChunkIfLoadedImmediately-in-places.patch => 0661-Use-getChunkIfLoadedImmediately-in-places.patch} (90%)
rename patches/server/{0676-Fix-commands-from-signs-not-firing-command-events.patch => 0662-Fix-commands-from-signs-not-firing-command-events.patch} (100%)
rename patches/server/{0677-Adds-PlayerArmSwingEvent.patch => 0663-Adds-PlayerArmSwingEvent.patch} (100%)
rename patches/server/{0678-Fixes-kick-event-leave-message-not-being-sent.patch => 0664-Fixes-kick-event-leave-message-not-being-sent.patch} (95%)
rename patches/server/{0679-Add-config-for-mobs-immune-to-default-effects.patch => 0665-Add-config-for-mobs-immune-to-default-effects.patch} (95%)
rename patches/server/{0680-Fix-incorrect-message-for-outdated-client.patch => 0666-Fix-incorrect-message-for-outdated-client.patch} (100%)
rename patches/server/{0681-Don-t-apply-cramming-damage-to-players.patch => 0667-Don-t-apply-cramming-damage-to-players.patch} (89%)
rename patches/server/{0682-Rate-options-and-timings-for-sensors-and-behaviors.patch => 0668-Rate-options-and-timings-for-sensors-and-behaviors.patch} (100%)
rename patches/server/{0683-Add-a-bunch-of-missing-forceDrop-toggles.patch => 0669-Add-a-bunch-of-missing-forceDrop-toggles.patch} (100%)
rename patches/server/{0684-Stinger-API.patch => 0670-Stinger-API.patch} (100%)
rename patches/server/{0685-Fix-incosistency-issue-with-empty-map-items-in-CB.patch => 0671-Fix-incosistency-issue-with-empty-map-items-in-CB.patch} (100%)
rename patches/server/{0686-Add-System.out-err-catcher.patch => 0672-Add-System.out-err-catcher.patch} (98%)
rename patches/server/{0687-Fix-test-not-bootstrapping.patch => 0673-Fix-test-not-bootstrapping.patch} (100%)
rename patches/server/{0688-Rewrite-LogEvents-to-contain-the-source-jars-in-stac.patch => 0674-Rewrite-LogEvents-to-contain-the-source-jars-in-stac.patch} (100%)
rename patches/server/{0689-Improve-boat-collision-performance.patch => 0675-Improve-boat-collision-performance.patch} (98%)
rename patches/server/{0690-Prevent-AFK-kick-while-watching-end-credits.patch => 0676-Prevent-AFK-kick-while-watching-end-credits.patch} (100%)
rename patches/server/{0691-Allow-skipping-writing-of-comments-to-server.propert.patch => 0677-Allow-skipping-writing-of-comments-to-server.propert.patch} (100%)
rename patches/server/{0692-Add-PlayerSetSpawnEvent.patch => 0678-Add-PlayerSetSpawnEvent.patch} (94%)
rename patches/server/{0693-Make-hoppers-respect-inventory-max-stack-size.patch => 0679-Make-hoppers-respect-inventory-max-stack-size.patch} (100%)
rename patches/server/{0694-Optimize-entity-tracker-passenger-checks.patch => 0680-Optimize-entity-tracker-passenger-checks.patch} (100%)
rename patches/server/{0695-Config-option-for-Piglins-guarding-chests.patch => 0681-Config-option-for-Piglins-guarding-chests.patch} (100%)
rename patches/server/{0696-Added-EntityDamageItemEvent.patch => 0682-Added-EntityDamageItemEvent.patch} (100%)
rename patches/server/{0697-Optimize-indirect-passenger-iteration.patch => 0683-Optimize-indirect-passenger-iteration.patch} (87%)
rename patches/server/{0698-Fix-block-drops-position-losing-precision-millions-o.patch => 0684-Fix-block-drops-position-losing-precision-millions-o.patch} (100%)
rename patches/server/{0699-Configurable-item-frame-map-cursor-update-interval.patch => 0685-Configurable-item-frame-map-cursor-update-interval.patch} (100%)
rename patches/server/{0700-Make-EntityUnleashEvent-cancellable.patch => 0686-Make-EntityUnleashEvent-cancellable.patch} (100%)
rename patches/server/{0701-Clear-bucket-NBT-after-dispense.patch => 0687-Clear-bucket-NBT-after-dispense.patch} (100%)
rename patches/server/{0702-Change-EnderEye-target-without-changing-other-things.patch => 0688-Change-EnderEye-target-without-changing-other-things.patch} (100%)
rename patches/server/{0703-Add-BlockBreakBlockEvent.patch => 0689-Add-BlockBreakBlockEvent.patch} (100%)
rename patches/server/{0704-Option-to-prevent-NBT-copy-in-smithing-recipes.patch => 0690-Option-to-prevent-NBT-copy-in-smithing-recipes.patch} (100%)
rename patches/server/{0705-More-CommandBlock-API.patch => 0691-More-CommandBlock-API.patch} (100%)
rename patches/server/{0706-Add-missing-team-sidebar-display-slots.patch => 0692-Add-missing-team-sidebar-display-slots.patch} (100%)
rename patches/server/{0707-Add-back-EntityPortalExitEvent.patch => 0693-Add-back-EntityPortalExitEvent.patch} (93%)
rename patches/server/{0708-Add-methods-to-find-targets-for-lightning-strikes.patch => 0694-Add-methods-to-find-targets-for-lightning-strikes.patch} (86%)
rename patches/server/{0709-Get-entity-default-attributes.patch => 0695-Get-entity-default-attributes.patch} (100%)
rename patches/server/{0710-Left-handed-API.patch => 0696-Left-handed-API.patch} (100%)
rename patches/server/{0711-Add-advancement-display-API.patch => 0697-Add-advancement-display-API.patch} (100%)
rename patches/server/{0712-Add-ItemFactory-getMonsterEgg-API.patch => 0698-Add-ItemFactory-getMonsterEgg-API.patch} (100%)
rename patches/server/{0713-Add-critical-damage-API.patch => 0699-Add-critical-damage-API.patch} (100%)
rename patches/server/{0714-Fix-issues-with-mob-conversion.patch => 0700-Fix-issues-with-mob-conversion.patch} (100%)
rename patches/server/{0715-Add-isCollidable-methods-to-various-places.patch => 0701-Add-isCollidable-methods-to-various-places.patch} (100%)
rename patches/server/{0716-Goat-ram-API.patch => 0702-Goat-ram-API.patch} (100%)
rename patches/server/{0717-Add-API-for-resetting-a-single-score.patch => 0703-Add-API-for-resetting-a-single-score.patch} (100%)
rename patches/server/{0718-Add-Raw-Byte-Entity-Serialization.patch => 0704-Add-Raw-Byte-Entity-Serialization.patch} (96%)
rename patches/server/{0719-Vanilla-command-permission-fixes.patch => 0705-Vanilla-command-permission-fixes.patch} (100%)
rename patches/server/{0721-Do-not-run-close-logic-for-inventories-on-chunk-unlo.patch => 0706-Do-not-run-close-logic-for-inventories-on-chunk-unlo.patch} (92%)
rename patches/server/{0723-Fix-GameProfileCache-concurrency.patch => 0707-Fix-GameProfileCache-concurrency.patch} (97%)
rename patches/server/{0727-Log-when-the-async-catcher-is-tripped.patch => 0708-Log-when-the-async-catcher-is-tripped.patch} (76%)
rename patches/server/{0728-Add-paper-mobcaps-and-paper-playermobcaps.patch => 0709-Add-paper-mobcaps-and-paper-playermobcaps.patch} (97%)
rename patches/server/{0730-Sanitize-ResourceLocation-error-logging.patch => 0710-Sanitize-ResourceLocation-error-logging.patch} (100%)
rename patches/server/{0731-Allow-controlled-flushing-for-network-manager.patch => 0711-Allow-controlled-flushing-for-network-manager.patch} (90%)
rename patches/server/{0732-Optimise-general-POI-access.patch => 0712-Optimise-general-POI-access.patch} (98%)
rename patches/server/{0735-Optimise-chunk-tick-iteration.patch => 0713-Optimise-chunk-tick-iteration.patch} (86%)
rename patches/server/{0736-Execute-chunk-tasks-mid-tick.patch => 0714-Execute-chunk-tasks-mid-tick.patch} (87%)
rename patches/server/{0737-Attempt-to-recalculate-regionfile-header-if-it-is-co.patch => 0715-Attempt-to-recalculate-regionfile-header-if-it-is-co.patch} (98%)
rename patches/server/{0738-Custom-table-implementation-for-blockstate-state-loo.patch => 0716-Custom-table-implementation-for-blockstate-state-loo.patch} (100%)
rename patches/server/{0739-Detail-more-information-in-watchdog-dumps.patch => 0717-Detail-more-information-in-watchdog-dumps.patch} (91%)
rename patches/server/{0740-Manually-inline-methods-in-BlockPosition.patch => 0718-Manually-inline-methods-in-BlockPosition.patch} (97%)
rename patches/server/{0741-Distance-manager-tick-timings.patch => 0719-Distance-manager-tick-timings.patch} (53%)
rename patches/server/{0742-Name-craft-scheduler-threads-according-to-the-plugin.patch => 0720-Name-craft-scheduler-threads-according-to-the-plugin.patch} (96%)
rename patches/server/{0743-Make-sure-inlined-getChunkAt-has-inlined-logic-for-l.patch => 0721-Make-sure-inlined-getChunkAt-has-inlined-logic-for-l.patch} (87%)
rename patches/server/{0744-Add-packet-limiter-config.patch => 0722-Add-packet-limiter-config.patch} (94%)
rename patches/server/{0745-Use-correct-LevelStem-registry-when-loading-default-.patch => 0723-Use-correct-LevelStem-registry-when-loading-default-.patch} (96%)
rename patches/server/{0746-Don-t-read-neighbour-chunk-data-off-disk-when-conver.patch => 0724-Don-t-read-neighbour-chunk-data-off-disk-when-conver.patch} (93%)
rename patches/server/{0747-Consolidate-flush-calls-for-entity-tracker-packets.patch => 0725-Consolidate-flush-calls-for-entity-tracker-packets.patch} (91%)
rename patches/server/{0748-Don-t-lookup-fluid-state-when-raytracing.patch => 0726-Don-t-lookup-fluid-state-when-raytracing.patch} (95%)
rename patches/server/{0749-Time-scoreboard-search.patch => 0727-Time-scoreboard-search.patch} (97%)
rename patches/server/{0750-Send-full-pos-packets-for-hard-colliding-entities.patch => 0728-Send-full-pos-packets-for-hard-colliding-entities.patch} (100%)
rename patches/server/{0751-Do-not-run-raytrace-logic-for-AIR.patch => 0729-Do-not-run-raytrace-logic-for-AIR.patch} (100%)
rename patches/server/{0752-Oprimise-map-impl-for-tracked-players.patch => 0730-Oprimise-map-impl-for-tracked-players.patch} (69%)
rename patches/server/{0753-Optimise-BlockSoil-nearby-water-lookup.patch => 0731-Optimise-BlockSoil-nearby-water-lookup.patch} (100%)
rename patches/server/{0755-Optimise-random-block-ticking.patch => 0732-Optimise-random-block-ticking.patch} (97%)
delete mode 100644 patches/server/0733-Add-more-async-catchers.patch
rename patches/server/{0756-Optimise-non-flush-packet-sending.patch => 0733-Optimise-non-flush-packet-sending.patch} (92%)
rename patches/server/{0757-Optimise-nearby-player-lookups.patch => 0734-Optimise-nearby-player-lookups.patch} (90%)
rename patches/server/{0759-Remove-streams-for-villager-AI.patch => 0735-Remove-streams-for-villager-AI.patch} (99%)
rename patches/server/{0761-Use-Velocity-compression-and-cipher-natives.patch => 0736-Use-Velocity-compression-and-cipher-natives.patch} (98%)
rename patches/server/{0762-Reduce-worldgen-thread-worker-count-for-low-core-cou.patch => 0737-Reduce-worldgen-thread-worker-count-for-low-core-cou.patch} (100%)
rename patches/server/{0764-Async-catch-modifications-to-critical-entity-state.patch => 0738-Async-catch-modifications-to-critical-entity-state.patch} (78%)
rename patches/server/{0765-Fix-Bukkit-NamespacedKey-shenanigans.patch => 0739-Fix-Bukkit-NamespacedKey-shenanigans.patch} (100%)
rename patches/server/{0766-Fix-merchant-inventory-not-closing-on-entity-removal.patch => 0740-Fix-merchant-inventory-not-closing-on-entity-removal.patch} (90%)
rename patches/server/{0767-Check-requirement-before-suggesting-root-nodes.patch => 0741-Check-requirement-before-suggesting-root-nodes.patch} (100%)
rename patches/server/{0768-Don-t-respond-to-ServerboundCommandSuggestionPacket-.patch => 0742-Don-t-respond-to-ServerboundCommandSuggestionPacket-.patch} (100%)
rename patches/server/{0769-Fix-setPatternColor-on-tropical-fish-bucket-meta.patch => 0743-Fix-setPatternColor-on-tropical-fish-bucket-meta.patch} (100%)
rename patches/server/{0770-Ensure-valid-vehicle-status.patch => 0744-Ensure-valid-vehicle-status.patch} (84%)
rename patches/server/{0771-Prevent-softlocked-end-exit-portal-generation.patch => 0745-Prevent-softlocked-end-exit-portal-generation.patch} (100%)
rename patches/server/{0772-Fix-CocaoDecorator-causing-a-crash-when-trying-to-ge.patch => 0746-Fix-CocaoDecorator-causing-a-crash-when-trying-to-ge.patch} (100%)
rename patches/server/{0773-Don-t-log-debug-logging-being-disabled.patch => 0747-Don-t-log-debug-logging-being-disabled.patch} (100%)
rename patches/server/{0774-fix-various-menus-with-empty-level-accesses.patch => 0748-fix-various-menus-with-empty-level-accesses.patch} (100%)
rename patches/server/{0775-Preserve-overstacked-loot.patch => 0749-Preserve-overstacked-loot.patch} (100%)
rename patches/server/{0776-Update-head-rotation-in-missing-places.patch => 0750-Update-head-rotation-in-missing-places.patch} (84%)
rename patches/server/{0777-prevent-unintended-light-block-manipulation.patch => 0751-prevent-unintended-light-block-manipulation.patch} (100%)
rename patches/server/{0778-Fix-CraftCriteria-defaults-map.patch => 0752-Fix-CraftCriteria-defaults-map.patch} (100%)
rename patches/server/{0779-Fix-upstreams-block-state-factories.patch => 0753-Fix-upstreams-block-state-factories.patch} (100%)
rename patches/server/{0780-Add-config-option-for-logging-player-ip-addresses.patch => 0754-Add-config-option-for-logging-player-ip-addresses.patch} (98%)
rename patches/server/{0781-Configurable-feature-seeds.patch => 0755-Configurable-feature-seeds.patch} (91%)
rename patches/server/{0782-VanillaCommandWrapper-didnt-account-for-entity-sende.patch => 0756-VanillaCommandWrapper-didnt-account-for-entity-sende.patch} (100%)
rename patches/server/{0783-Add-root-admin-user-detection.patch => 0757-Add-root-admin-user-detection.patch} (97%)
rename patches/server/{0784-Always-allow-item-changing-in-Fireball.patch => 0758-Always-allow-item-changing-in-Fireball.patch} (100%)
delete mode 100644 patches/server/0758-Optimise-WorldServer-notify.patch
rename patches/server/{0785-don-t-attempt-to-teleport-dead-entities.patch => 0759-don-t-attempt-to-teleport-dead-entities.patch} (85%)
rename patches/server/{0786-Prevent-excessive-velocity-through-repeated-crits.patch => 0760-Prevent-excessive-velocity-through-repeated-crits.patch} (100%)
rename patches/server/{0787-Remove-client-side-code-using-deprecated-for-removal.patch => 0761-Remove-client-side-code-using-deprecated-for-removal.patch} (100%)
rename patches/server/{0789-Always-parse-protochunk-light-sources-unless-it-is-m.patch => 0762-Always-parse-protochunk-light-sources-unless-it-is-m.patch} (95%)
rename patches/server/{0790-Fix-removing-recipes-from-RecipeIterator.patch => 0763-Fix-removing-recipes-from-RecipeIterator.patch} (100%)
rename patches/server/{0791-Prevent-sending-oversized-item-data-in-equipment-and.patch => 0764-Prevent-sending-oversized-item-data-in-equipment-and.patch} (100%)
rename patches/server/{0792-Hide-unnecessary-itemmeta-from-clients.patch => 0765-Hide-unnecessary-itemmeta-from-clients.patch} (100%)
rename patches/server/{0793-Fix-kelp-modifier-changing-growth-for-other-crops.patch => 0766-Fix-kelp-modifier-changing-growth-for-other-crops.patch} (100%)
rename patches/server/{0794-Prevent-ContainerOpenersCounter-openCount-from-going.patch => 0767-Prevent-ContainerOpenersCounter-openCount-from-going.patch} (100%)
rename patches/server/{0795-Add-PlayerItemFrameChangeEvent.patch => 0768-Add-PlayerItemFrameChangeEvent.patch} (100%)
rename patches/server/{0796-Add-player-health-update-API.patch => 0769-Add-player-health-update-API.patch} (88%)
rename patches/server/{0797-Optimize-HashMapPalette.patch => 0770-Optimize-HashMapPalette.patch} (100%)
rename patches/server/{0798-Allow-delegation-to-vanilla-chunk-gen.patch => 0771-Allow-delegation-to-vanilla-chunk-gen.patch} (98%)
rename patches/server/{0799-Highly-optimise-single-and-multi-AABB-VoxelShapes-an.patch => 0772-Highly-optimise-single-and-multi-AABB-VoxelShapes-an.patch} (99%)
rename patches/server/{0800-Optimise-collision-checking-in-player-move-packet-ha.patch => 0773-Optimise-collision-checking-in-player-move-packet-ha.patch} (99%)
rename patches/server/{0802-Fix-ChunkSnapshot-isSectionEmpty-int-and-optimize-Pa.patch => 0774-Fix-ChunkSnapshot-isSectionEmpty-int-and-optimize-Pa.patch} (96%)
rename patches/server/{0803-Update-Log4j.patch => 0775-Update-Log4j.patch} (100%)
rename patches/server/{0804-Add-more-Campfire-API.patch => 0776-Add-more-Campfire-API.patch} (100%)
rename patches/server/{0805-Only-write-chunk-data-to-disk-if-it-serializes-witho.patch => 0777-Only-write-chunk-data-to-disk-if-it-serializes-witho.patch} (93%)
rename patches/server/{0806-Fix-tripwire-state-inconsistency.patch => 0778-Fix-tripwire-state-inconsistency.patch} (100%)
rename patches/server/{0807-Fix-fluid-logging-on-Block-breakNaturally.patch => 0779-Fix-fluid-logging-on-Block-breakNaturally.patch} (100%)
rename patches/server/{0808-Forward-CraftEntity-in-teleport-command.patch => 0780-Forward-CraftEntity-in-teleport-command.patch} (88%)
rename patches/server/{0809-Improve-scoreboard-entries.patch => 0781-Improve-scoreboard-entries.patch} (100%)
rename patches/server/{0810-Entity-powdered-snow-API.patch => 0782-Entity-powdered-snow-API.patch} (100%)
rename patches/server/{0811-Add-API-for-item-entity-health.patch => 0783-Add-API-for-item-entity-health.patch} (100%)
rename patches/server/{0812-Fix-entity-type-tags-suggestions-in-selectors.patch => 0784-Fix-entity-type-tags-suggestions-in-selectors.patch} (100%)
rename patches/server/{0813-Configurable-max-block-light-for-monster-spawning.patch => 0785-Configurable-max-block-light-for-monster-spawning.patch} (100%)
rename patches/server/{0814-Fix-sticky-pistons-and-BlockPistonRetractEvent.patch => 0786-Fix-sticky-pistons-and-BlockPistonRetractEvent.patch} (100%)
rename patches/server/{0815-Load-effect-amplifiers-greater-than-127-correctly.patch => 0787-Load-effect-amplifiers-greater-than-127-correctly.patch} (100%)
rename patches/server/{0816-Expose-isFuel-and-canSmelt-methods-to-FurnaceInvento.patch => 0788-Expose-isFuel-and-canSmelt-methods-to-FurnaceInvento.patch} (100%)
rename patches/server/{0817-Fix-bees-aging-inside-hives.patch => 0789-Fix-bees-aging-inside-hives.patch} (100%)
rename patches/server/{0818-Bucketable-API.patch => 0790-Bucketable-API.patch} (100%)
rename patches/server/{0819-Check-player-world-in-endPortalSoundRadius.patch => 0791-Check-player-world-in-endPortalSoundRadius.patch} (100%)
rename patches/server/{0820-Validate-usernames.patch => 0792-Validate-usernames.patch} (97%)
rename patches/server/{0821-Fix-saving-configs-with-more-long-comments.patch => 0793-Fix-saving-configs-with-more-long-comments.patch} (100%)
rename patches/server/{0822-Make-water-animal-spawn-height-configurable.patch => 0794-Make-water-animal-spawn-height-configurable.patch} (100%)
rename patches/server/{0823-Expose-vanilla-BiomeProvider-from-WorldInfo.patch => 0795-Expose-vanilla-BiomeProvider-from-WorldInfo.patch} (96%)
rename patches/server/{0824-Add-config-option-for-worlds-affected-by-time-cmd.patch => 0796-Add-config-option-for-worlds-affected-by-time-cmd.patch} (100%)
rename patches/server/{0825-Add-new-overload-to-PersistentDataContainer-has.patch => 0797-Add-new-overload-to-PersistentDataContainer-has.patch} (100%)
rename patches/server/{0826-Multiple-Entries-with-Scoreboards.patch => 0798-Multiple-Entries-with-Scoreboards.patch} (100%)
rename patches/server/{0827-Reset-placed-block-on-exception.patch => 0799-Reset-placed-block-on-exception.patch} (100%)
rename patches/server/{0828-Add-configurable-height-for-slime-spawn.patch => 0800-Add-configurable-height-for-slime-spawn.patch} (100%)
rename patches/server/{0829-Added-getHostname-to-AsyncPlayerPreLoginEvent.patch => 0801-Added-getHostname-to-AsyncPlayerPreLoginEvent.patch} (100%)
rename patches/server/{0830-Fix-xp-reward-for-baby-zombies.patch => 0802-Fix-xp-reward-for-baby-zombies.patch} (100%)
rename patches/server/{0831-Kick-on-main-for-illegal-chat.patch => 0803-Kick-on-main-for-illegal-chat.patch} (100%)
rename patches/server/{0832-Multi-Block-Change-API-Implementation.patch => 0804-Multi-Block-Change-API-Implementation.patch} (95%)
rename patches/server/{0833-Fix-NotePlayEvent.patch => 0805-Fix-NotePlayEvent.patch} (100%)
rename patches/server/{0834-Freeze-Tick-Lock-API.patch => 0806-Freeze-Tick-Lock-API.patch} (92%)
rename patches/server/{0835-Dolphin-API.patch => 0807-Dolphin-API.patch} (100%)
rename patches/server/{0836-More-PotionEffectType-API.patch => 0808-More-PotionEffectType-API.patch} (100%)
rename patches/server/{0837-Use-a-CHM-for-StructureTemplate.Pallete-cache.patch => 0809-Use-a-CHM-for-StructureTemplate.Pallete-cache.patch} (100%)
rename patches/server/{0838-API-for-creating-command-sender-which-forwards-feedb.patch => 0810-API-for-creating-command-sender-which-forwards-feedb.patch} (98%)
rename patches/server/{0839-Add-config-for-stronghold-seed.patch => 0811-Add-config-for-stronghold-seed.patch} (96%)
rename patches/server/{0840-Implement-regenerateChunk.patch => 0812-Implement-regenerateChunk.patch} (98%)
rename patches/server/{0841-Fix-cancelled-powdered-snow-bucket-placement.patch => 0813-Fix-cancelled-powdered-snow-bucket-placement.patch} (100%)
rename patches/server/{0842-Add-missing-Validate-calls-to-CraftServer-getSpawnLi.patch => 0814-Add-missing-Validate-calls-to-CraftServer-getSpawnLi.patch} (91%)
rename patches/server/{0843-Add-GameEvent-tags.patch => 0815-Add-GameEvent-tags.patch} (97%)
rename patches/server/{0844-Execute-chunk-tasks-fairly-for-worlds-while-waiting-.patch => 0816-Execute-chunk-tasks-fairly-for-worlds-while-waiting-.patch} (87%)
rename patches/server/{0846-Furnace-RecipesUsed-API.patch => 0817-Furnace-RecipesUsed-API.patch} (100%)
rename patches/server/{0847-Configurable-sculk-sensor-listener-range.patch => 0818-Configurable-sculk-sensor-listener-range.patch} (100%)
rename patches/server/{0848-Add-missing-block-data-mins-and-maxes.patch => 0819-Add-missing-block-data-mins-and-maxes.patch} (100%)
rename patches/server/{0849-Option-to-have-default-CustomSpawners-in-custom-worl.patch => 0820-Option-to-have-default-CustomSpawners-in-custom-worl.patch} (96%)
rename patches/server/{0850-Put-world-into-worldlist-before-initing-the-world.patch => 0821-Put-world-into-worldlist-before-initing-the-world.patch} (84%)
rename patches/server/{0851-Fix-Entity-Position-Desync.patch => 0822-Fix-Entity-Position-Desync.patch} (100%)
rename patches/server/{0852-Custom-Potion-Mixes.patch => 0823-Custom-Potion-Mixes.patch} (97%)
rename patches/server/{0854-Fix-Fluid-tags-isTagged-method.patch => 0824-Fix-Fluid-tags-isTagged-method.patch} (100%)
rename patches/server/{0855-Force-close-world-loading-screen.patch => 0825-Force-close-world-loading-screen.patch} (95%)
rename patches/server/{0856-Fix-falling-block-spawn-methods.patch => 0826-Fix-falling-block-spawn-methods.patch} (93%)
rename patches/server/{0857-Expose-furnace-minecart-push-values.patch => 0827-Expose-furnace-minecart-push-values.patch} (100%)
rename patches/server/{0858-Fix-cancelling-ProjectileHitEvent-for-piercing-arrow.patch => 0828-Fix-cancelling-ProjectileHitEvent-for-piercing-arrow.patch} (100%)
rename patches/server/{0860-More-Projectile-API.patch => 0829-More-Projectile-API.patch} (99%)
rename patches/server/{0861-Fix-swamp-hut-cat-generation-deadlock.patch => 0830-Fix-swamp-hut-cat-generation-deadlock.patch} (100%)
rename patches/server/{0862-Don-t-allow-vehicle-movement-from-players-while-tele.patch => 0831-Don-t-allow-vehicle-movement-from-players-while-tele.patch} (100%)
rename patches/server/{0863-Implement-getComputedBiome-API.patch => 0832-Implement-getComputedBiome-API.patch} (100%)
rename patches/server/{0864-Make-some-itemstacks-nonnull.patch => 0833-Make-some-itemstacks-nonnull.patch} (100%)
rename patches/server/{0865-Add-debug-for-invalid-GameProfiles-on-skull-blocks-i.patch => 0834-Add-debug-for-invalid-GameProfiles-on-skull-blocks-i.patch} (100%)
rename patches/server/{0866-Implement-enchantWithLevels-API.patch => 0835-Implement-enchantWithLevels-API.patch} (100%)
rename patches/server/{0867-Fix-saving-in-unloadWorld.patch => 0836-Fix-saving-in-unloadWorld.patch} (90%)
rename patches/server/{0868-Buffer-OOB-setBlock-calls.patch => 0837-Buffer-OOB-setBlock-calls.patch} (100%)
rename patches/server/{0869-Add-TameableDeathMessageEvent.patch => 0838-Add-TameableDeathMessageEvent.patch} (100%)
rename patches/server/{0870-Fix-new-block-data-for-EntityChangeBlockEvent-when-s.patch => 0839-Fix-new-block-data-for-EntityChangeBlockEvent-when-s.patch} (100%)
rename patches/server/{0871-fix-player-loottables-running-when-mob-loot-gamerule.patch => 0840-fix-player-loottables-running-when-mob-loot-gamerule.patch} (87%)
rename patches/server/{0872-Ensure-entity-passenger-world-matches-ridden-entity.patch => 0841-Ensure-entity-passenger-world-matches-ridden-entity.patch} (88%)
rename patches/server/{0873-Guard-against-invalid-entity-positions.patch => 0842-Guard-against-invalid-entity-positions.patch} (88%)
rename patches/server/{0874-cache-resource-keys.patch => 0843-cache-resource-keys.patch} (100%)
rename patches/server/{0875-Allow-to-change-the-podium-for-the-EnderDragon.patch => 0844-Allow-to-change-the-podium-for-the-EnderDragon.patch} (97%)
rename patches/server/{0876-Fix-NBT-pieces-overriding-a-block-entity-during-worl.patch => 0845-Fix-NBT-pieces-overriding-a-block-entity-during-worl.patch} (100%)
rename patches/server/{0877-Fix-StructureGrowEvent-species-for-RED_MUSHROOM.patch => 0846-Fix-StructureGrowEvent-species-for-RED_MUSHROOM.patch} (100%)
rename patches/server/{0878-Prevent-tile-entity-copies-loading-chunks.patch => 0847-Prevent-tile-entity-copies-loading-chunks.patch} (100%)
rename patches/server/{0879-Use-username-instead-of-display-name-in-PlayerList-g.patch => 0848-Use-username-instead-of-display-name-in-PlayerList-g.patch} (92%)
rename patches/server/{0880-Fix-slime-spawners-not-spawning-outside-slime-chunks.patch => 0849-Fix-slime-spawners-not-spawning-outside-slime-chunks.patch} (100%)
rename patches/server/{0881-Pass-ServerLevel-for-gamerule-callbacks.patch => 0850-Pass-ServerLevel-for-gamerule-callbacks.patch} (97%)
rename patches/server/{0882-Add-pre-unbreaking-amount-to-PlayerItemDamageEvent.patch => 0851-Add-pre-unbreaking-amount-to-PlayerItemDamageEvent.patch} (100%)
rename patches/server/{0883-WorldCreator-keepSpawnLoaded.patch => 0852-WorldCreator-keepSpawnLoaded.patch} (77%)
rename patches/server/{0884-Fix-NPE-for-BlockDataMeta-getBlockData.patch => 0853-Fix-NPE-for-BlockDataMeta-getBlockData.patch} (100%)
rename patches/server/{0885-Trigger-bee_nest_destroyed-trigger-in-the-correct-pl.patch => 0854-Trigger-bee_nest_destroyed-trigger-in-the-correct-pl.patch} (100%)
rename patches/server/{0886-Add-EntityDyeEvent-and-CollarColorable-interface.patch => 0855-Add-EntityDyeEvent-and-CollarColorable-interface.patch} (100%)
rename patches/server/{0887-Fire-CauldronLevelChange-on-initial-fill.patch => 0856-Fire-CauldronLevelChange-on-initial-fill.patch} (100%)
rename patches/server/{0888-fix-powder-snow-cauldrons-not-turning-to-water.patch => 0857-fix-powder-snow-cauldrons-not-turning-to-water.patch} (100%)
rename patches/server/{0889-Add-PlayerStopUsingItemEvent.patch => 0858-Add-PlayerStopUsingItemEvent.patch} (100%)
rename patches/server/{0890-FallingBlock-auto-expire-setting.patch => 0859-FallingBlock-auto-expire-setting.patch} (100%)
rename patches/server/{0891-Don-t-tick-markers.patch => 0860-Don-t-tick-markers.patch} (91%)
rename patches/server/{0892-Do-not-accept-invalid-client-settings.patch => 0861-Do-not-accept-invalid-client-settings.patch} (100%)
rename patches/server/{0893-Add-support-for-Proxy-Protocol.patch => 0862-Add-support-for-Proxy-Protocol.patch} (100%)
rename patches/server/{0894-Fix-OfflinePlayer-getBedSpawnLocation.patch => 0863-Fix-OfflinePlayer-getBedSpawnLocation.patch} (100%)
rename patches/server/{0895-Fix-FurnaceInventory-for-smokers-and-blast-furnaces.patch => 0864-Fix-FurnaceInventory-for-smokers-and-blast-furnaces.patch} (100%)
rename patches/server/{0896-Sanitize-Sent-BlockEntity-NBT.patch => 0865-Sanitize-Sent-BlockEntity-NBT.patch} (100%)
rename patches/server/{0897-Prevent-entity-loading-causing-async-lookups.patch => 0866-Prevent-entity-loading-causing-async-lookups.patch} (94%)
rename patches/server/{0898-Disable-component-selector-resolving-in-books-by-def.patch => 0867-Disable-component-selector-resolving-in-books-by-def.patch} (100%)
rename patches/server/{0899-Throw-exception-on-world-create-while-being-ticked.patch => 0868-Throw-exception-on-world-create-while-being-ticked.patch} (90%)
rename patches/server/{0900-Add-Alternate-Current-redstone-implementation.patch => 0869-Add-Alternate-Current-redstone-implementation.patch} (99%)
rename patches/server/{0901-Dont-resent-entity-on-art-update.patch => 0870-Dont-resent-entity-on-art-update.patch} (100%)
rename patches/server/{0902-Add-missing-spawn-eggs.patch => 0871-Add-missing-spawn-eggs.patch} (96%)
rename patches/server/{0903-Add-WardenAngerChangeEvent.patch => 0872-Add-WardenAngerChangeEvent.patch} (100%)
rename patches/server/{0904-Add-option-for-strict-advancement-dimension-checks.patch => 0873-Add-option-for-strict-advancement-dimension-checks.patch} (90%)
rename patches/server/{0905-Add-missing-important-BlockStateListPopulator-method.patch => 0874-Add-missing-important-BlockStateListPopulator-method.patch} (100%)
rename patches/server/{0906-Nameable-Banner-API.patch => 0875-Nameable-Banner-API.patch} (100%)
rename patches/server/{0907-Don-t-broadcast-messages-to-command-blocks.patch => 0876-Don-t-broadcast-messages-to-command-blocks.patch} (95%)
rename patches/server/{0908-Prevent-empty-items-from-being-added-to-world.patch => 0877-Prevent-empty-items-from-being-added-to-world.patch} (94%)
rename patches/server/{0909-Fix-CCE-for-SplashPotion-and-LingeringPotion-spawnin.patch => 0878-Fix-CCE-for-SplashPotion-and-LingeringPotion-spawnin.patch} (100%)
rename patches/server/{0910-Don-t-print-component-in-resource-pack-rejection-mes.patch => 0879-Don-t-print-component-in-resource-pack-rejection-mes.patch} (100%)
rename patches/server/{0911-Add-Player-getFishHook.patch => 0880-Add-Player-getFishHook.patch} (100%)
rename patches/server/{0912-Do-not-sync-load-chunk-for-dynamic-game-event-listen.patch => 0881-Do-not-sync-load-chunk-for-dynamic-game-event-listen.patch} (100%)
rename patches/server/{0913-Add-various-missing-EntityDropItemEvent-calls.patch => 0882-Add-various-missing-EntityDropItemEvent-calls.patch} (96%)
rename patches/server/{0914-Add-some-minimal-debug-information-to-chat-packet-er.patch => 0883-Add-some-minimal-debug-information-to-chat-packet-er.patch} (100%)
rename patches/server/{0915-Fix-Bee-flower-NPE.patch => 0884-Fix-Bee-flower-NPE.patch} (100%)
rename patches/server/{0916-Fix-Spigot-Config-not-using-commands.spam-exclusions.patch => 0885-Fix-Spigot-Config-not-using-commands.spam-exclusions.patch} (100%)
rename patches/server/{0917-Add-SpawnReason-to-Tadpoles-spawned-by-Frogspawn.patch => 0886-Add-SpawnReason-to-Tadpoles-spawned-by-Frogspawn.patch} (100%)
rename patches/server/{0918-More-Teleport-API.patch => 0887-More-Teleport-API.patch} (99%)
rename patches/server/{0919-Add-EntityPortalReadyEvent.patch => 0888-Add-EntityPortalReadyEvent.patch} (88%)
rename patches/server/{0920-Don-t-use-level-random-in-entity-constructors.patch => 0889-Don-t-use-level-random-in-entity-constructors.patch} (100%)
rename patches/server/{0921-Send-block-entities-after-destroy-prediction.patch => 0890-Send-block-entities-after-destroy-prediction.patch} (100%)
rename patches/server/{0922-Warn-on-plugins-accessing-faraway-chunks.patch => 0891-Warn-on-plugins-accessing-faraway-chunks.patch} (91%)
rename patches/server/{0923-Custom-Chat-Completion-Suggestions-API.patch => 0892-Custom-Chat-Completion-Suggestions-API.patch} (94%)
rename patches/server/{0924-Add-missing-BlockFadeEvents.patch => 0893-Add-missing-BlockFadeEvents.patch} (100%)
rename patches/server/{0925-Collision-API.patch => 0894-Collision-API.patch} (100%)
rename patches/server/{0926-Fix-suggest-command-message-for-brigadier-syntax-exc.patch => 0895-Fix-suggest-command-message-for-brigadier-syntax-exc.patch} (100%)
rename patches/server/{0927-Fix-command-preprocess-cancelling-and-command-changi.patch => 0896-Fix-command-preprocess-cancelling-and-command-changi.patch} (100%)
rename patches/server/{0928-Remove-invalid-signature-login-stacktrace.patch => 0897-Remove-invalid-signature-login-stacktrace.patch} (100%)
rename patches/server/{0929-Add-async-catcher-to-PlayerConnection-internalTelepo.patch => 0898-Add-async-catcher-to-PlayerConnection-internalTelepo.patch} (100%)
rename patches/server/{0930-Block-Ticking-API.patch => 0899-Block-Ticking-API.patch} (100%)
rename patches/server/{0931-Add-Velocity-IP-Forwarding-Support.patch => 0900-Add-Velocity-IP-Forwarding-Support.patch} (99%)
rename patches/server/{0932-Use-thread-safe-random-in-ServerLoginPacketListenerI.patch => 0901-Use-thread-safe-random-in-ServerLoginPacketListenerI.patch} (100%)
rename patches/server/{0933-Add-NamespacedKey-biome-methods.patch => 0902-Add-NamespacedKey-biome-methods.patch} (100%)
rename patches/server/{0934-Fix-plugin-loggers-on-server-shutdown.patch => 0903-Fix-plugin-loggers-on-server-shutdown.patch} (92%)
rename patches/server/{0935-Workaround-for-client-lag-spikes-MC-162253.patch => 0904-Workaround-for-client-lag-spikes-MC-162253.patch} (96%)
rename patches/server/{0936-Stop-large-look-changes-from-crashing-the-server.patch => 0905-Stop-large-look-changes-from-crashing-the-server.patch} (100%)
rename patches/server/{0937-Add-custom-destroyerIdentity-to-sendBlockDamage.patch => 0906-Add-custom-destroyerIdentity-to-sendBlockDamage.patch} (94%)
rename patches/server/{0938-Fix-EndDragonFight-killed-statuses-should-be-false-f.patch => 0907-Fix-EndDragonFight-killed-statuses-should-be-false-f.patch} (100%)
rename patches/server/{0939-Fire-EntityChangeBlockEvent-in-more-places.patch => 0908-Fire-EntityChangeBlockEvent-in-more-places.patch} (100%)
rename patches/server/{0940-Missing-eating-regain-reason.patch => 0909-Missing-eating-regain-reason.patch} (100%)
rename patches/server/{0941-Missing-effect-cause.patch => 0910-Missing-effect-cause.patch} (100%)
rename patches/server/{0942-Added-byte-array-serialization-deserialization-for-P.patch => 0911-Added-byte-array-serialization-deserialization-for-P.patch} (100%)
rename patches/server/{0943-Add-a-consumer-parameter-to-ProjectileSource-launchP.patch => 0912-Add-a-consumer-parameter-to-ProjectileSource-launchP.patch} (100%)
rename patches/server/{0944-Call-BlockPhysicsEvent-more-often.patch => 0913-Call-BlockPhysicsEvent-more-often.patch} (100%)
rename patches/server/{0945-Configurable-chat-thread-limit.patch => 0914-Configurable-chat-thread-limit.patch} (95%)
rename patches/server/{0946-Mitigate-effects-of-WorldCreator-keepSpawnLoaded-ret.patch => 0915-Mitigate-effects-of-WorldCreator-keepSpawnLoaded-ret.patch} (100%)
rename patches/server/{0947-Set-position-before-player-sending-on-dimension-chan.patch => 0916-Set-position-before-player-sending-on-dimension-chan.patch} (88%)
diff --git a/patches/server/0014-ChunkMapDistance-CME.patch b/patches/removed/1.19.2-legacy-chunksystem/0014-ChunkMapDistance-CME.patch
similarity index 100%
rename from patches/server/0014-ChunkMapDistance-CME.patch
rename to patches/removed/1.19.2-legacy-chunksystem/0014-ChunkMapDistance-CME.patch
diff --git a/patches/server/0015-Do-not-copy-visible-chunks.patch b/patches/removed/1.19.2-legacy-chunksystem/0015-Do-not-copy-visible-chunks.patch
similarity index 100%
rename from patches/server/0015-Do-not-copy-visible-chunks.patch
rename to patches/removed/1.19.2-legacy-chunksystem/0015-Do-not-copy-visible-chunks.patch
diff --git a/patches/server/0016-Chunk-debug-command.patch b/patches/removed/1.19.2-legacy-chunksystem/0016-Chunk-debug-command.patch
similarity index 100%
rename from patches/server/0016-Chunk-debug-command.patch
rename to patches/removed/1.19.2-legacy-chunksystem/0016-Chunk-debug-command.patch
diff --git a/patches/server/0017-Make-CallbackExecutor-strict-again.patch b/patches/removed/1.19.2-legacy-chunksystem/0017-Make-CallbackExecutor-strict-again.patch
similarity index 100%
rename from patches/server/0017-Make-CallbackExecutor-strict-again.patch
rename to patches/removed/1.19.2-legacy-chunksystem/0017-Make-CallbackExecutor-strict-again.patch
diff --git a/patches/server/0019-Asynchronous-chunk-IO-and-loading.patch b/patches/removed/1.19.2-legacy-chunksystem/0019-Asynchronous-chunk-IO-and-loading.patch
similarity index 100%
rename from patches/server/0019-Asynchronous-chunk-IO-and-loading.patch
rename to patches/removed/1.19.2-legacy-chunksystem/0019-Asynchronous-chunk-IO-and-loading.patch
diff --git a/patches/server/0020-Implement-Chunk-Priority-Urgency-System-for-Chunks.patch b/patches/removed/1.19.2-legacy-chunksystem/0020-Implement-Chunk-Priority-Urgency-System-for-Chunks.patch
similarity index 100%
rename from patches/server/0020-Implement-Chunk-Priority-Urgency-System-for-Chunks.patch
rename to patches/removed/1.19.2-legacy-chunksystem/0020-Implement-Chunk-Priority-Urgency-System-for-Chunks.patch
diff --git a/patches/server/0048-Per-Player-View-Distance-API-placeholders.patch b/patches/removed/1.19.2-legacy-chunksystem/0048-Per-Player-View-Distance-API-placeholders.patch
similarity index 100%
rename from patches/server/0048-Per-Player-View-Distance-API-placeholders.patch
rename to patches/removed/1.19.2-legacy-chunksystem/0048-Per-Player-View-Distance-API-placeholders.patch
diff --git a/patches/server/0137-Make-targetSize-more-aggressive-in-the-chunk-unload-.patch b/patches/removed/1.19.2-legacy-chunksystem/0137-Make-targetSize-more-aggressive-in-the-chunk-unload-.patch
similarity index 100%
rename from patches/server/0137-Make-targetSize-more-aggressive-in-the-chunk-unload-.patch
rename to patches/removed/1.19.2-legacy-chunksystem/0137-Make-targetSize-more-aggressive-in-the-chunk-unload-.patch
diff --git a/patches/server/0324-Fix-CraftServer-isPrimaryThread-and-MinecraftServer-.patch b/patches/removed/1.19.2-legacy-chunksystem/0324-Fix-CraftServer-isPrimaryThread-and-MinecraftServer-.patch
similarity index 100%
rename from patches/server/0324-Fix-CraftServer-isPrimaryThread-and-MinecraftServer-.patch
rename to patches/removed/1.19.2-legacy-chunksystem/0324-Fix-CraftServer-isPrimaryThread-and-MinecraftServer-.patch
diff --git a/patches/server/0355-Fix-Light-Command.patch b/patches/removed/1.19.2-legacy-chunksystem/0355-Fix-Light-Command.patch
similarity index 100%
rename from patches/server/0355-Fix-Light-Command.patch
rename to patches/removed/1.19.2-legacy-chunksystem/0355-Fix-Light-Command.patch
diff --git a/patches/server/0393-Optimise-ArraySetSorted-removeIf.patch b/patches/removed/1.19.2-legacy-chunksystem/0393-Optimise-ArraySetSorted-removeIf.patch
similarity index 100%
rename from patches/server/0393-Optimise-ArraySetSorted-removeIf.patch
rename to patches/removed/1.19.2-legacy-chunksystem/0393-Optimise-ArraySetSorted-removeIf.patch
diff --git a/patches/server/0397-Fix-Chunk-Post-Processing-deadlock-risk.patch b/patches/removed/1.19.2-legacy-chunksystem/0397-Fix-Chunk-Post-Processing-deadlock-risk.patch
similarity index 100%
rename from patches/server/0397-Fix-Chunk-Post-Processing-deadlock-risk.patch
rename to patches/removed/1.19.2-legacy-chunksystem/0397-Fix-Chunk-Post-Processing-deadlock-risk.patch
diff --git a/patches/server/0429-Optimize-ServerLevels-chunk-level-checking-methods.patch b/patches/removed/1.19.2-legacy-chunksystem/0429-Optimize-ServerLevels-chunk-level-checking-methods.patch
similarity index 100%
rename from patches/server/0429-Optimize-ServerLevels-chunk-level-checking-methods.patch
rename to patches/removed/1.19.2-legacy-chunksystem/0429-Optimize-ServerLevels-chunk-level-checking-methods.patch
diff --git a/patches/server/0482-Improve-Chunk-Status-Transition-Speed.patch b/patches/removed/1.19.2-legacy-chunksystem/0482-Improve-Chunk-Status-Transition-Speed.patch
similarity index 100%
rename from patches/server/0482-Improve-Chunk-Status-Transition-Speed.patch
rename to patches/removed/1.19.2-legacy-chunksystem/0482-Improve-Chunk-Status-Transition-Speed.patch
diff --git a/patches/server/0720-Do-not-allow-the-server-to-unload-chunks-at-request-.patch b/patches/removed/1.19.2-legacy-chunksystem/0720-Do-not-allow-the-server-to-unload-chunks-at-request-.patch
similarity index 100%
rename from patches/server/0720-Do-not-allow-the-server-to-unload-chunks-at-request-.patch
rename to patches/removed/1.19.2-legacy-chunksystem/0720-Do-not-allow-the-server-to-unload-chunks-at-request-.patch
diff --git a/patches/server/0722-Correctly-handle-recursion-for-chunkholder-updates.patch b/patches/removed/1.19.2-legacy-chunksystem/0722-Correctly-handle-recursion-for-chunkholder-updates.patch
similarity index 100%
rename from patches/server/0722-Correctly-handle-recursion-for-chunkholder-updates.patch
rename to patches/removed/1.19.2-legacy-chunksystem/0722-Correctly-handle-recursion-for-chunkholder-updates.patch
diff --git a/patches/server/0724-Fix-chunks-refusing-to-unload-at-low-TPS.patch b/patches/removed/1.19.2-legacy-chunksystem/0724-Fix-chunks-refusing-to-unload-at-low-TPS.patch
similarity index 100%
rename from patches/server/0724-Fix-chunks-refusing-to-unload-at-low-TPS.patch
rename to patches/removed/1.19.2-legacy-chunksystem/0724-Fix-chunks-refusing-to-unload-at-low-TPS.patch
diff --git a/patches/server/0725-Do-not-allow-ticket-level-changes-while-unloading-pl.patch b/patches/removed/1.19.2-legacy-chunksystem/0725-Do-not-allow-ticket-level-changes-while-unloading-pl.patch
similarity index 100%
rename from patches/server/0725-Do-not-allow-ticket-level-changes-while-unloading-pl.patch
rename to patches/removed/1.19.2-legacy-chunksystem/0725-Do-not-allow-ticket-level-changes-while-unloading-pl.patch
diff --git a/patches/server/0726-Do-not-allow-ticket-level-changes-when-updating-chun.patch b/patches/removed/1.19.2-legacy-chunksystem/0726-Do-not-allow-ticket-level-changes-when-updating-chun.patch
similarity index 100%
rename from patches/server/0726-Do-not-allow-ticket-level-changes-when-updating-chun.patch
rename to patches/removed/1.19.2-legacy-chunksystem/0726-Do-not-allow-ticket-level-changes-when-updating-chun.patch
diff --git a/patches/server/0729-Prevent-unload-calls-removing-tickets-for-sync-loads.patch b/patches/removed/1.19.2-legacy-chunksystem/0729-Prevent-unload-calls-removing-tickets-for-sync-loads.patch
similarity index 100%
rename from patches/server/0729-Prevent-unload-calls-removing-tickets-for-sync-loads.patch
rename to patches/removed/1.19.2-legacy-chunksystem/0729-Prevent-unload-calls-removing-tickets-for-sync-loads.patch
diff --git a/patches/server/0734-Rewrite-entity-bounding-box-lookup-calls.patch b/patches/removed/1.19.2-legacy-chunksystem/0734-Rewrite-entity-bounding-box-lookup-calls.patch
similarity index 100%
rename from patches/server/0734-Rewrite-entity-bounding-box-lookup-calls.patch
rename to patches/removed/1.19.2-legacy-chunksystem/0734-Rewrite-entity-bounding-box-lookup-calls.patch
diff --git a/patches/server/0754-Allow-removal-addition-of-entities-to-entity-ticklis.patch b/patches/removed/1.19.2-legacy-chunksystem/0754-Allow-removal-addition-of-entities-to-entity-ticklis.patch
similarity index 100%
rename from patches/server/0754-Allow-removal-addition-of-entities-to-entity-ticklis.patch
rename to patches/removed/1.19.2-legacy-chunksystem/0754-Allow-removal-addition-of-entities-to-entity-ticklis.patch
diff --git a/patches/server/0763-Do-not-process-entity-loads-in-CraftChunk-getEntitie.patch b/patches/removed/1.19.2-legacy-chunksystem/0763-Do-not-process-entity-loads-in-CraftChunk-getEntitie.patch
similarity index 100%
rename from patches/server/0763-Do-not-process-entity-loads-in-CraftChunk-getEntitie.patch
rename to patches/removed/1.19.2-legacy-chunksystem/0763-Do-not-process-entity-loads-in-CraftChunk-getEntitie.patch
diff --git a/patches/server/0801-Actually-unload-POI-data.patch b/patches/removed/1.19.2-legacy-chunksystem/0801-Actually-unload-POI-data.patch
similarity index 100%
rename from patches/server/0801-Actually-unload-POI-data.patch
rename to patches/removed/1.19.2-legacy-chunksystem/0801-Actually-unload-POI-data.patch
diff --git a/patches/server/0845-Replace-ticket-level-propagator.patch b/patches/removed/1.19.2-legacy-chunksystem/0845-Replace-ticket-level-propagator.patch
similarity index 100%
rename from patches/server/0845-Replace-ticket-level-propagator.patch
rename to patches/removed/1.19.2-legacy-chunksystem/0845-Replace-ticket-level-propagator.patch
diff --git a/patches/server/0853-Replace-player-chunk-loader-system.patch b/patches/removed/1.19.2-legacy-chunksystem/0853-Replace-player-chunk-loader-system.patch
similarity index 100%
rename from patches/server/0853-Replace-player-chunk-loader-system.patch
rename to patches/removed/1.19.2-legacy-chunksystem/0853-Replace-player-chunk-loader-system.patch
diff --git a/patches/server/0859-Fix-save-problems-on-shutdown.patch b/patches/removed/1.19.2-legacy-chunksystem/0859-Fix-save-problems-on-shutdown.patch
similarity index 100%
rename from patches/server/0859-Fix-save-problems-on-shutdown.patch
rename to patches/removed/1.19.2-legacy-chunksystem/0859-Fix-save-problems-on-shutdown.patch
diff --git a/patches/server/0004-Paper-config-files.patch b/patches/server/0004-Paper-config-files.patch
index 40e5fd63d4..7034b9995c 100644
--- a/patches/server/0004-Paper-config-files.patch
+++ b/patches/server/0004-Paper-config-files.patch
@@ -431,14 +431,13 @@ index 0000000000000000000000000000000000000000..c2dca89291361d60cbf160cab77749cb
+}
diff --git a/src/main/java/io/papermc/paper/configuration/GlobalConfiguration.java b/src/main/java/io/papermc/paper/configuration/GlobalConfiguration.java
new file mode 100644
-index 0000000000000000000000000000000000000000..aaecd691922a28e971b859175574c80a330edb8e
+index 0000000000000000000000000000000000000000..84785fed0d85d78c4caf8fabe35c0e89a59240d5
--- /dev/null
+++ b/src/main/java/io/papermc/paper/configuration/GlobalConfiguration.java
-@@ -0,0 +1,274 @@
+@@ -0,0 +1,275 @@
+package io.papermc.paper.configuration;
+
+import co.aikar.timings.MinecraftTimings;
-+import com.destroystokyo.paper.io.chunk.ChunkTaskManager;
+import io.papermc.paper.configuration.constraint.Constraint;
+import io.papermc.paper.configuration.constraint.Constraints;
+import net.kyori.adventure.text.Component;
@@ -604,15 +603,17 @@ index 0000000000000000000000000000000000000000..aaecd691922a28e971b859175574c80a
+ public boolean saveEmptyScoreboardTeams = false;
+ }
+
-+ public AsyncChunks asyncChunks;
++ public ChunkSystem chunkSystem;
+
-+ public class AsyncChunks extends ConfigurationPart.Post {
-+ public int threads = -1;
-+ public transient boolean asyncChunks = false;
++ public class ChunkSystem extends ConfigurationPart.Post {
++
++ public int ioThreads = -1;
++ public int workerThreads = -1;
++ public String genParallelism = "default";
+
+ @Override
+ public void postProcess() {
-+ ChunkTaskManager.processConfiguration(this);
++ io.papermc.paper.chunk.system.scheduling.ChunkTaskScheduler.init(this);
+ }
+ }
+
diff --git a/patches/server/0006-ConcurrentUtil.patch b/patches/server/0006-ConcurrentUtil.patch
index 065818eebe..c2bb4af335 100644
--- a/patches/server/0006-ConcurrentUtil.patch
+++ b/patches/server/0006-ConcurrentUtil.patch
@@ -1412,6 +1412,160 @@ index 0000000000000000000000000000000000000000..f4415f782b32fed25da98e44b172f717
+ }
+ }
+}
+diff --git a/src/main/java/ca/spottedleaf/concurrentutil/collection/SRSWLinkedQueue.java b/src/main/java/ca/spottedleaf/concurrentutil/collection/SRSWLinkedQueue.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..597659f38aa816646dcda4ca39c002b6d9f9a792
+--- /dev/null
++++ b/src/main/java/ca/spottedleaf/concurrentutil/collection/SRSWLinkedQueue.java
+@@ -0,0 +1,148 @@
++package ca.spottedleaf.concurrentutil.collection;
++
++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
++import ca.spottedleaf.concurrentutil.util.Validate;
++import java.lang.invoke.VarHandle;
++import java.util.ConcurrentModificationException;
++
++/**
++ * Single reader thread single writer thread queue. The reader side of the queue is ordered by acquire semantics,
++ * and the writer side of the queue is ordered by release semantics.
++ */
++// TODO test
++public class SRSWLinkedQueue {
++
++ // always non-null
++ protected LinkedNode head;
++
++ // always non-null
++ protected LinkedNode tail;
++
++ /* IMPL NOTE: Leave hashCode and equals to their defaults */
++
++ public SRSWLinkedQueue() {
++ final LinkedNode dummy = new LinkedNode<>(null, null);
++ this.head = this.tail = dummy;
++ }
++
++ /**
++ * Must be the reader thread.
++ *
++ *
++ * Returns, without removing, the first element of this queue.
++ *
++ * @return Returns, without removing, the first element of this queue.
++ */
++ public E peekFirst() {
++ LinkedNode head = this.head;
++ E ret = head.getElementPlain();
++ if (ret == null) {
++ head = head.getNextAcquire();
++ if (head == null) {
++ // empty
++ return null;
++ }
++ // update head reference for next poll() call
++ this.head = head;
++ // guaranteed to be non-null
++ ret = head.getElementPlain();
++ if (ret == null) {
++ throw new ConcurrentModificationException("Multiple reader threads");
++ }
++ }
++
++ return ret;
++ }
++
++ /**
++ * Must be the reader thread.
++ *
++ *
++ * Returns and removes the first element of this queue.
++ *
++ * @return Returns and removes the first element of this queue.
++ */
++ public E poll() {
++ LinkedNode head = this.head;
++ E ret = head.getElementPlain();
++ if (ret == null) {
++ head = head.getNextAcquire();
++ if (head == null) {
++ // empty
++ return null;
++ }
++ // guaranteed to be non-null
++ ret = head.getElementPlain();
++ if (ret == null) {
++ throw new ConcurrentModificationException("Multiple reader threads");
++ }
++ }
++
++ head.setElementPlain(null);
++ LinkedNode next = head.getNextAcquire();
++ this.head = next == null ? head : next;
++
++ return ret;
++ }
++
++ /**
++ * Must be the writer thread.
++ *
++ *
++ * Adds the element to the end of the queue.
++ *
++ *
++ * @throws NullPointerException If the provided element is null
++ */
++ public void addLast(final E element) {
++ Validate.notNull(element, "Provided element cannot be null");
++ final LinkedNode append = new LinkedNode<>(element, null);
++
++ this.tail.setNextRelease(append);
++ this.tail = append;
++ }
++
++ protected static final class LinkedNode {
++
++ protected volatile Object element;
++ protected volatile LinkedNode next;
++
++ protected static final VarHandle ELEMENT_HANDLE = ConcurrentUtil.getVarHandle(LinkedNode.class, "element", Object.class);
++ protected static final VarHandle NEXT_HANDLE = ConcurrentUtil.getVarHandle(LinkedNode.class, "next", LinkedNode.class);
++
++ protected LinkedNode(final Object element, final LinkedNode next) {
++ ELEMENT_HANDLE.set(this, element);
++ NEXT_HANDLE.set(this, next);
++ }
++
++ /* element */
++
++ @SuppressWarnings("unchecked")
++ protected final E getElementPlain() {
++ return (E)ELEMENT_HANDLE.get(this);
++ }
++
++ protected final void setElementPlain(final E update) {
++ ELEMENT_HANDLE.set(this, (Object)update);
++ }
++ /* next */
++
++ @SuppressWarnings("unchecked")
++ protected final LinkedNode getNextPlain() {
++ return (LinkedNode)NEXT_HANDLE.get(this);
++ }
++
++ @SuppressWarnings("unchecked")
++ protected final LinkedNode getNextAcquire() {
++ return (LinkedNode)NEXT_HANDLE.getAcquire(this);
++ }
++
++ protected final void setNextPlain(final LinkedNode next) {
++ NEXT_HANDLE.set(this, next);
++ }
++
++ protected final void setNextRelease(final LinkedNode next) {
++ NEXT_HANDLE.setRelease(this, next);
++ }
++ }
++}
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/completable/Completable.java b/src/main/java/ca/spottedleaf/concurrentutil/completable/Completable.java
new file mode 100644
index 0000000000000000000000000000000000000000..a1ad3308f9c3545a604b635896259a1cd3382b2a
@@ -1518,7 +1672,7 @@ index 0000000000000000000000000000000000000000..a1ad3308f9c3545a604b635896259a1c
+}
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/executor/BaseExecutor.java b/src/main/java/ca/spottedleaf/concurrentutil/executor/BaseExecutor.java
new file mode 100644
-index 0000000000000000000000000000000000000000..716a0fd3f558df748e355069746272facb91de22
+index 0000000000000000000000000000000000000000..8c452b0988da4725762d543f6bee09915c328ae6
--- /dev/null
+++ b/src/main/java/ca/spottedleaf/concurrentutil/executor/BaseExecutor.java
@@ -0,0 +1,198 @@
@@ -1575,11 +1729,11 @@ index 0000000000000000000000000000000000000000..716a0fd3f558df748e355069746272fa
+ * @throws IllegalStateException If the current thread is not allowed to wait
+ */
+ public default void waitUntilAllExecuted() throws IllegalStateException {
-+ long failures = 9L; // start out at 1ms
++ long failures = 1L; // start at 0.25ms
+
+ while (!this.haveAllTasksExecuted()) {
+ Thread.yield();
-+ failures = ConcurrentUtil.linearLongBackoff(failures, 500_000L, 5_000_000L); // 500us, 5ms
++ failures = ConcurrentUtil.linearLongBackoff(failures, 250_000L, 5_000_000L); // 500us, 5ms
+ }
+ }
+
diff --git a/patches/server/0008-MC-Utils.patch b/patches/server/0008-MC-Utils.patch
index cc478ca443..41a5d247a4 100644
--- a/patches/server/0008-MC-Utils.patch
+++ b/patches/server/0008-MC-Utils.patch
@@ -856,6 +856,137 @@ index 0000000000000000000000000000000000000000..277cfd9d1e8fff5d9b5e534b75c3c516
+ return this.map.values().iterator();
+ }
+}
+diff --git a/src/main/java/com/destroystokyo/paper/util/maplist/ReferenceList.java b/src/main/java/com/destroystokyo/paper/util/maplist/ReferenceList.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..190c5f0b02a3d99054704ae1afbffb3498ddffe1
+--- /dev/null
++++ b/src/main/java/com/destroystokyo/paper/util/maplist/ReferenceList.java
+@@ -0,0 +1,125 @@
++package com.destroystokyo.paper.util.maplist;
++
++import it.unimi.dsi.fastutil.objects.Reference2IntOpenHashMap;
++import java.util.Arrays;
++import java.util.Iterator;
++import java.util.NoSuchElementException;
++
++/**
++ * @author Spottedleaf
++ */
++public final class ReferenceList implements Iterable {
++
++ protected final Reference2IntOpenHashMap referenceToIndex = new Reference2IntOpenHashMap<>(2, 0.8f);
++ {
++ this.referenceToIndex.defaultReturnValue(Integer.MIN_VALUE);
++ }
++
++ protected static final Object[] EMPTY_LIST = new Object[0];
++
++ protected Object[] references = EMPTY_LIST;
++ protected int count;
++
++ public int size() {
++ return this.count;
++ }
++
++ public boolean contains(final E obj) {
++ return this.referenceToIndex.containsKey(obj);
++ }
++
++ public boolean remove(final E obj) {
++ final int index = this.referenceToIndex.removeInt(obj);
++ if (index == Integer.MIN_VALUE) {
++ return false;
++ }
++
++ // move the object at the end to this index
++ final int endIndex = --this.count;
++ final E end = (E)this.references[endIndex];
++ if (index != endIndex) {
++ // not empty after this call
++ this.referenceToIndex.put(end, index); // update index
++ }
++ this.references[index] = end;
++ this.references[endIndex] = null;
++
++ return true;
++ }
++
++ public boolean add(final E obj) {
++ final int count = this.count;
++ final int currIndex = this.referenceToIndex.putIfAbsent(obj, count);
++
++ if (currIndex != Integer.MIN_VALUE) {
++ return false; // already in this list
++ }
++
++ Object[] list = this.references;
++
++ if (list.length == count) {
++ // resize required
++ list = this.references = Arrays.copyOf(list, (int)Math.max(4L, count * 2L)); // overflow results in negative
++ }
++
++ list[count] = obj;
++ this.count = count + 1;
++
++ return true;
++ }
++
++ public E getChecked(final int index) {
++ if (index < 0 || index >= this.count) {
++ throw new IndexOutOfBoundsException("Index: " + index + " is out of bounds, size: " + this.count);
++ }
++ return (E)this.references[index];
++ }
++
++ public E getUnchecked(final int index) {
++ return (E)this.references[index];
++ }
++
++ public Object[] getRawData() {
++ return this.references;
++ }
++
++ public void clear() {
++ this.referenceToIndex.clear();
++ Arrays.fill(this.references, 0, this.count, null);
++ this.count = 0;
++ }
++
++ @Override
++ public Iterator iterator() {
++ return new Iterator<>() {
++ private E lastRet;
++ private int current;
++
++ @Override
++ public boolean hasNext() {
++ return this.current < ReferenceList.this.count;
++ }
++
++ @Override
++ public E next() {
++ if (this.current >= ReferenceList.this.count) {
++ throw new NoSuchElementException();
++ }
++ return this.lastRet = (E)ReferenceList.this.references[this.current++];
++ }
++
++ @Override
++ public void remove() {
++ final E lastRet = this.lastRet;
++
++ if (lastRet == null) {
++ throw new IllegalStateException();
++ }
++ this.lastRet = null;
++
++ ReferenceList.this.remove(lastRet);
++ --this.current;
++ }
++ };
++ }
++}
diff --git a/src/main/java/com/destroystokyo/paper/util/misc/AreaMap.java b/src/main/java/com/destroystokyo/paper/util/misc/AreaMap.java
new file mode 100644
index 0000000000000000000000000000000000000000..c89f6986eda5a132a948732ea1b6923370685317
@@ -4530,10 +4661,10 @@ index 207f1c1fc9d4451d27047bb8362bded8cd53e32f..021a26a6b1c258deffc26c035ab52a4e
} else {
diff --git a/src/main/java/net/minecraft/server/ChunkSystem.java b/src/main/java/net/minecraft/server/ChunkSystem.java
new file mode 100644
-index 0000000000000000000000000000000000000000..83dc09f6526206690c474b50a7a6e71cefc93ab4
+index 0000000000000000000000000000000000000000..c59fca05484c30b28e883f5b5dde0362f294b517
--- /dev/null
+++ b/src/main/java/net/minecraft/server/ChunkSystem.java
-@@ -0,0 +1,269 @@
+@@ -0,0 +1,294 @@
+package net.minecraft.server;
+
+import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
@@ -4544,6 +4675,7 @@ index 0000000000000000000000000000000000000000..83dc09f6526206690c474b50a7a6e71c
+import net.minecraft.server.level.ChunkHolder;
+import net.minecraft.server.level.ChunkMap;
+import net.minecraft.server.level.ServerLevel;
++import net.minecraft.server.level.ServerPlayer;
+import net.minecraft.server.level.TicketType;
+import net.minecraft.world.entity.Entity;
+import net.minecraft.world.level.ChunkPos;
@@ -4775,30 +4907,54 @@ index 0000000000000000000000000000000000000000..83dc09f6526206690c474b50a7a6e71c
+ }
+ }
+
-+ public static void onChunkBorder(LevelChunk chunk, ChunkHolder holder) {
++ public static void onChunkBorder(final LevelChunk chunk, final ChunkHolder holder) {
+ chunk.playerChunk = holder;
+ }
+
-+ public static void onChunkNotBorder(LevelChunk chunk, ChunkHolder holder) {
++ public static void onChunkNotBorder(final LevelChunk chunk, final ChunkHolder holder) {
+
+ }
+
-+ public static void onChunkTicking(LevelChunk chunk, ChunkHolder holder) {
++ public static void onChunkTicking(final LevelChunk chunk, final ChunkHolder holder) {
+ chunk.level.getChunkSource().tickingChunks.add(chunk);
+ }
+
-+ public static void onChunkNotTicking(LevelChunk chunk, ChunkHolder holder) {
++ public static void onChunkNotTicking(final LevelChunk chunk, final ChunkHolder holder) {
+ chunk.level.getChunkSource().tickingChunks.remove(chunk);
+ }
+
-+ public static void onChunkEntityTicking(LevelChunk chunk, ChunkHolder holder) {
++ public static void onChunkEntityTicking(final LevelChunk chunk, final ChunkHolder holder) {
+ chunk.level.getChunkSource().entityTickingChunks.add(chunk);
+ }
+
-+ public static void onChunkNotEntityTicking(LevelChunk chunk, ChunkHolder holder) {
++ public static void onChunkNotEntityTicking(final LevelChunk chunk, final ChunkHolder holder) {
+ chunk.level.getChunkSource().entityTickingChunks.remove(chunk);
+ }
+
++ public static ChunkHolder getUnloadingChunkHolder(final ServerLevel level, final int chunkX, final int chunkZ) {
++ return level.chunkSource.chunkMap.getUnloadingChunkHolder(chunkX, chunkZ);
++ }
++
++ public static int getSendViewDistance(final ServerPlayer player) {
++ return getLoadViewDistance(player);
++ }
++
++ public static int getLoadViewDistance(final ServerPlayer player) {
++ final ServerLevel level = player.getLevel();
++ if (level == null) {
++ return Bukkit.getViewDistance() + 1;
++ }
++ return level.chunkSource.chunkMap.getEffectiveViewDistance() + 1;
++ }
++
++ public static int getTickViewDistance(final ServerPlayer player) {
++ final ServerLevel level = player.getLevel();
++ if (level == null) {
++ return Bukkit.getSimulationDistance();
++ }
++ return level.chunkSource.chunkMap.distanceManager.getSimulationDistance();
++ }
++
+ private ChunkSystem() {
+ throw new RuntimeException();
+ }
@@ -5882,7 +6038,7 @@ index 91a9b9ff0d7821a2261e7137fb1b3989ba096b88..1fbe1b6de925f71763f79fe3d2371b70
@Override
diff --git a/src/main/java/net/minecraft/server/level/DistanceManager.java b/src/main/java/net/minecraft/server/level/DistanceManager.java
-index 6c98676827ceb6999f340fa2b06a0b3e1cb4cae2..f08089b8672454acf8c2309e850466b335248692 100644
+index 6c98676827ceb6999f340fa2b06a0b3e1cb4cae2..fbe62a31ab199d83a1db0a4e0b1a813824e6f2c2 100644
--- a/src/main/java/net/minecraft/server/level/DistanceManager.java
+++ b/src/main/java/net/minecraft/server/level/DistanceManager.java
@@ -60,8 +60,9 @@ public abstract class DistanceManager {
@@ -5904,7 +6060,20 @@ index 6c98676827ceb6999f340fa2b06a0b3e1cb4cae2..f08089b8672454acf8c2309e850466b3
}
protected void purgeStaleTickets() {
-@@ -382,7 +384,7 @@ public abstract class DistanceManager {
+@@ -319,6 +321,12 @@ public abstract class DistanceManager {
+ this.playerTicketManager.updateViewDistance(viewDistance);
+ }
+
++ // Paper start
++ public int getSimulationDistance() {
++ return this.simulationDistance;
++ }
++ // Paper end
++
+ public void updateSimulationDistance(int simulationDistance) {
+ if (simulationDistance != this.simulationDistance) {
+ this.simulationDistance = simulationDistance;
+@@ -382,7 +390,7 @@ public abstract class DistanceManager {
}
public void removeTicketsOnClosing() {
@@ -6332,19 +6501,20 @@ index aa396df025115c7fd866cbc63a44c2c17abfde84..b2f79a0c9caa6783816afc36531c9437
public ServerLevel(MinecraftServer minecraftserver, Executor executor, LevelStorageSource.LevelStorageAccess convertable_conversionsession, PrimaryLevelData iworlddataserver, ResourceKey resourcekey, LevelStem worlddimension, ChunkProgressListener worldloadlistener, boolean flag, long i, List list, boolean flag1, org.bukkit.World.Environment env, org.bukkit.generator.ChunkGenerator gen, org.bukkit.generator.BiomeProvider biomeProvider) {
// Holder holder = worlddimension.typeHolder(); // CraftBukkit - decompile error
diff --git a/src/main/java/net/minecraft/server/level/ServerPlayer.java b/src/main/java/net/minecraft/server/level/ServerPlayer.java
-index e2ed77972fcec43fef5f3af044479849f78901a9..84564ca128d2dfc79c0b5a13b699cf6fc80bdea7 100644
+index e2ed77972fcec43fef5f3af044479849f78901a9..bdad7b404067ab65d85d1628db9009896a43a052 100644
--- a/src/main/java/net/minecraft/server/level/ServerPlayer.java
+++ b/src/main/java/net/minecraft/server/level/ServerPlayer.java
-@@ -243,6 +243,8 @@ public class ServerPlayer extends Player {
+@@ -243,6 +243,9 @@ public class ServerPlayer extends Player {
public String kickLeaveMessage = null; // SPIGOT-3034: Forward leave message to PlayerQuitEvent
// CraftBukkit end
++ public boolean isRealPlayer; // Paper
+ public final com.destroystokyo.paper.util.misc.PooledLinkedHashSets.PooledObjectLinkedOpenHashSet cachedSingleHashSet; // Paper
+
public ServerPlayer(MinecraftServer server, ServerLevel world, GameProfile profile, @Nullable ProfilePublicKey publicKey) {
super(world, world.getSharedSpawnPos(), world.getSharedSpawnAngle(), profile, publicKey);
this.chatVisibility = ChatVisiblity.FULL;
-@@ -306,6 +308,8 @@ public class ServerPlayer extends Player {
+@@ -306,6 +309,8 @@ public class ServerPlayer extends Player {
this.maxUpStep = 1.0F;
this.fudgeSpawnLocation(world);
@@ -6396,6 +6566,18 @@ index 96ab71f72b43758b86f8990a74a238ad68e10890..32d6e4b194c3c4eca7009059f8d18589
@Override
public BlockState getBlockState(BlockPos pos) {
return this.getChunk(SectionPos.blockToSectionCoord(pos.getX()), SectionPos.blockToSectionCoord(pos.getZ())).getBlockState(pos);
+diff --git a/src/main/java/net/minecraft/server/players/PlayerList.java b/src/main/java/net/minecraft/server/players/PlayerList.java
+index 6987bee4bf2c1f3d47ffdd5329f6c0c63a2962a5..474843e57028ade5ef36ac5cda4924dbd95f6fe4 100644
+--- a/src/main/java/net/minecraft/server/players/PlayerList.java
++++ b/src/main/java/net/minecraft/server/players/PlayerList.java
+@@ -175,6 +175,7 @@ public abstract class PlayerList {
+ }
+
+ public void placeNewPlayer(Connection connection, ServerPlayer player) {
++ player.isRealPlayer = true; // Paper
+ GameProfile gameprofile = player.getGameProfile();
+ GameProfileCache usercache = this.server.getProfileCache();
+ Optional optional = usercache.get(gameprofile.getId());
diff --git a/src/main/java/net/minecraft/util/thread/BlockableEventLoop.java b/src/main/java/net/minecraft/util/thread/BlockableEventLoop.java
index 288fdbef407d11ab430d5d7026dfad148c3c1065..6fefa619299d3202158490630d62c16aef71e831 100644
--- a/src/main/java/net/minecraft/util/thread/BlockableEventLoop.java
@@ -6848,7 +7030,7 @@ index d484aaae8614e78fdb984b26304b1de8b649e4bd..fabc7df600c89b01d97a76eb0b1206a3
this.levelHeightAccessor = heightLimitView;
this.sections = new LevelChunkSection[heightLimitView.getSectionsCount()];
diff --git a/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java b/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java
-index e518e8e417f2eee43ff0847c24b6858054e7c9a9..ab986a3d1dc2f605b5b84d2b62cd97007e3a2c22 100644
+index e518e8e417f2eee43ff0847c24b6858054e7c9a9..bcd0287d99eeba2b3534b4a298dc4b79b293ec58 100644
--- a/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java
+++ b/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java
@@ -25,6 +25,7 @@ import net.minecraft.nbt.CompoundTag;
@@ -6859,12 +7041,11 @@ index e518e8e417f2eee43ff0847c24b6858054e7c9a9..ab986a3d1dc2f605b5b84d2b62cd9700
import net.minecraft.server.level.ServerLevel;
import net.minecraft.util.profiling.ProfilerFiller;
import net.minecraft.world.entity.Entity;
-@@ -124,6 +125,110 @@ public class LevelChunk extends ChunkAccess {
+@@ -124,6 +125,109 @@ public class LevelChunk extends ChunkAccess {
// CraftBukkit end
+ // Paper start
-+ public final com.destroystokyo.paper.util.maplist.EntityList entities = new com.destroystokyo.paper.util.maplist.EntityList();
+ public @Nullable ChunkHolder playerChunk;
+
+ static final int NEIGHBOUR_CACHE_RADIUS = 3;
@@ -6970,7 +7151,7 @@ index e518e8e417f2eee43ff0847c24b6858054e7c9a9..ab986a3d1dc2f605b5b84d2b62cd9700
public LevelChunk(ServerLevel world, ProtoChunk protoChunk, @Nullable LevelChunk.PostLoadProcessor entityLoader) {
this(world, protoChunk.getPos(), protoChunk.getUpgradeData(), protoChunk.unpackBlockTicks(), protoChunk.unpackFluidTicks(), protoChunk.getInhabitedTime(), protoChunk.getSections(), entityLoader, protoChunk.getBlendingData());
Iterator iterator = protoChunk.getBlockEntities().values().iterator();
-@@ -233,6 +338,18 @@ public class LevelChunk extends ChunkAccess {
+@@ -233,6 +337,18 @@ public class LevelChunk extends ChunkAccess {
}
}
@@ -6989,7 +7170,7 @@ index e518e8e417f2eee43ff0847c24b6858054e7c9a9..ab986a3d1dc2f605b5b84d2b62cd9700
@Override
public FluidState getFluidState(BlockPos pos) {
return this.getFluidState(pos.getX(), pos.getY(), pos.getZ());
-@@ -354,6 +471,7 @@ public class LevelChunk extends ChunkAccess {
+@@ -354,6 +470,7 @@ public class LevelChunk extends ChunkAccess {
return this.getBlockEntity(pos, LevelChunk.EntityCreationType.CHECK);
}
@@ -6997,7 +7178,7 @@ index e518e8e417f2eee43ff0847c24b6858054e7c9a9..ab986a3d1dc2f605b5b84d2b62cd9700
@Nullable
public BlockEntity getBlockEntity(BlockPos pos, LevelChunk.EntityCreationType creationType) {
// CraftBukkit start
-@@ -535,7 +653,25 @@ public class LevelChunk extends ChunkAccess {
+@@ -535,7 +652,25 @@ public class LevelChunk extends ChunkAccess {
// CraftBukkit start
public void loadCallback() {
@@ -7023,7 +7204,7 @@ index e518e8e417f2eee43ff0847c24b6858054e7c9a9..ab986a3d1dc2f605b5b84d2b62cd9700
if (server != null) {
/*
* If it's a new world, the first few chunks are generated inside
-@@ -574,6 +710,22 @@ public class LevelChunk extends ChunkAccess {
+@@ -574,6 +709,22 @@ public class LevelChunk extends ChunkAccess {
server.getPluginManager().callEvent(unloadEvent);
// note: saving can be prevented, but not forced if no saving is actually required
this.mustNotSave = !unloadEvent.isSaveChunk();
diff --git a/patches/server/0009-Adventure.patch b/patches/server/0009-Adventure.patch
index 5e5641e209..35a7393050 100644
--- a/patches/server/0009-Adventure.patch
+++ b/patches/server/0009-Adventure.patch
@@ -1959,7 +1959,7 @@ index 805a1773d55e2551911e5b8e69052e23f630359b..e4220f14a5ebf43dd3491fc8649c2be5
}
diff --git a/src/main/java/net/minecraft/server/level/ServerPlayer.java b/src/main/java/net/minecraft/server/level/ServerPlayer.java
-index 84564ca128d2dfc79c0b5a13b699cf6fc80bdea7..9ab4588e4e512176b881ad4c252e400ff6ea97bd 100644
+index bdad7b404067ab65d85d1628db9009896a43a052..5aad3da061d391d1003bdcca95dd4f7e5c0e5ea8 100644
--- a/src/main/java/net/minecraft/server/level/ServerPlayer.java
+++ b/src/main/java/net/minecraft/server/level/ServerPlayer.java
@@ -154,6 +154,7 @@ import net.minecraft.world.scores.Score;
@@ -1978,7 +1978,7 @@ index 84564ca128d2dfc79c0b5a13b699cf6fc80bdea7..9ab4588e4e512176b881ad4c252e400f
public Component listName;
public org.bukkit.Location compassTarget;
public int newExp = 0;
-@@ -312,6 +314,7 @@ public class ServerPlayer extends Player {
+@@ -313,6 +315,7 @@ public class ServerPlayer extends Player {
// CraftBukkit start
this.displayName = this.getScoreboardName();
@@ -1986,7 +1986,7 @@ index 84564ca128d2dfc79c0b5a13b699cf6fc80bdea7..9ab4588e4e512176b881ad4c252e400f
this.bukkitPickUpLoot = true;
this.maxHealthCache = this.getMaxHealth();
}
-@@ -788,22 +791,17 @@ public class ServerPlayer extends Player {
+@@ -789,22 +792,17 @@ public class ServerPlayer extends Player {
String deathmessage = defaultMessage.getString();
this.keepLevel = keepInventory; // SPIGOT-2222: pre-set keepLevel
@@ -2013,7 +2013,7 @@ index 84564ca128d2dfc79c0b5a13b699cf6fc80bdea7..9ab4588e4e512176b881ad4c252e400f
this.connection.send(new ClientboundPlayerCombatKillPacket(this.getCombatTracker(), ichatbasecomponent), PacketSendListener.exceptionallySend(() -> {
boolean flag1 = true;
-@@ -1729,8 +1727,13 @@ public class ServerPlayer extends Player {
+@@ -1730,8 +1728,13 @@ public class ServerPlayer extends Player {
}
public void sendChatMessage(OutgoingPlayerChatMessage message, boolean filterMaskEnabled, ChatType.Bound params) {
@@ -2028,7 +2028,7 @@ index 84564ca128d2dfc79c0b5a13b699cf6fc80bdea7..9ab4588e4e512176b881ad4c252e400f
}
}
-@@ -1751,6 +1754,7 @@ public class ServerPlayer extends Player {
+@@ -1752,6 +1755,7 @@ public class ServerPlayer extends Player {
}
public String locale = "en_us"; // CraftBukkit - add, lowercase
@@ -2036,7 +2036,7 @@ index 84564ca128d2dfc79c0b5a13b699cf6fc80bdea7..9ab4588e4e512176b881ad4c252e400f
public void updateOptions(ServerboundClientInformationPacket packet) {
// CraftBukkit start
if (getMainArm() != packet.mainHand()) {
-@@ -1762,6 +1766,10 @@ public class ServerPlayer extends Player {
+@@ -1763,6 +1767,10 @@ public class ServerPlayer extends Player {
this.server.server.getPluginManager().callEvent(event);
}
this.locale = packet.language;
@@ -2278,7 +2278,7 @@ index 3a587073dbe5e8a599d342c5f758d842b7b6cddb..a426adfba3fccf1815177e0b8065684c
@Override
diff --git a/src/main/java/net/minecraft/server/players/PlayerList.java b/src/main/java/net/minecraft/server/players/PlayerList.java
-index 6987bee4bf2c1f3d47ffdd5329f6c0c63a2962a5..521f485366c65527ac3289dd27d8f2e311706a10 100644
+index 474843e57028ade5ef36ac5cda4924dbd95f6fe4..3710f544a491a837b973daedc2dfa51357b70b56 100644
--- a/src/main/java/net/minecraft/server/players/PlayerList.java
+++ b/src/main/java/net/minecraft/server/players/PlayerList.java
@@ -8,6 +8,7 @@ import com.mojang.logging.LogUtils;
@@ -2289,7 +2289,7 @@ index 6987bee4bf2c1f3d47ffdd5329f6c0c63a2962a5..521f485366c65527ac3289dd27d8f2e3
import java.io.File;
import java.net.SocketAddress;
import java.nio.file.Path;
-@@ -264,7 +265,7 @@ public abstract class PlayerList {
+@@ -265,7 +266,7 @@ public abstract class PlayerList {
}
// CraftBukkit start
ichatmutablecomponent.withStyle(ChatFormatting.YELLOW);
@@ -2298,7 +2298,7 @@ index 6987bee4bf2c1f3d47ffdd5329f6c0c63a2962a5..521f485366c65527ac3289dd27d8f2e3
playerconnection.teleport(player.getX(), player.getY(), player.getZ(), player.getYRot(), player.getXRot());
this.players.add(player);
-@@ -278,19 +279,18 @@ public abstract class PlayerList {
+@@ -279,19 +280,18 @@ public abstract class PlayerList {
// Ensure that player inventory is populated with its viewer
player.containerMenu.transferTo(player.containerMenu, bukkitPlayer);
@@ -2323,7 +2323,7 @@ index 6987bee4bf2c1f3d47ffdd5329f6c0c63a2962a5..521f485366c65527ac3289dd27d8f2e3
}
// CraftBukkit end
-@@ -487,7 +487,7 @@ public abstract class PlayerList {
+@@ -488,7 +488,7 @@ public abstract class PlayerList {
}
@@ -2332,7 +2332,7 @@ index 6987bee4bf2c1f3d47ffdd5329f6c0c63a2962a5..521f485366c65527ac3289dd27d8f2e3
ServerLevel worldserver = entityplayer.getLevel();
entityplayer.awardStat(Stats.LEAVE_GAME);
-@@ -498,7 +498,7 @@ public abstract class PlayerList {
+@@ -499,7 +499,7 @@ public abstract class PlayerList {
entityplayer.closeContainer();
}
@@ -2341,7 +2341,7 @@ index 6987bee4bf2c1f3d47ffdd5329f6c0c63a2962a5..521f485366c65527ac3289dd27d8f2e3
this.cserver.getPluginManager().callEvent(playerQuitEvent);
entityplayer.getBukkitEntity().disconnect(playerQuitEvent.getQuitMessage());
-@@ -551,7 +551,7 @@ public abstract class PlayerList {
+@@ -552,7 +552,7 @@ public abstract class PlayerList {
this.cserver.getScoreboardManager().removePlayer(entityplayer.getBukkitEntity());
// CraftBukkit end
@@ -2350,7 +2350,7 @@ index 6987bee4bf2c1f3d47ffdd5329f6c0c63a2962a5..521f485366c65527ac3289dd27d8f2e3
}
// CraftBukkit start - Whole method, SocketAddress to LoginListener, added hostname to signature, return EntityPlayer
-@@ -597,10 +597,10 @@ public abstract class PlayerList {
+@@ -598,10 +598,10 @@ public abstract class PlayerList {
}
// return chatmessage;
@@ -2363,7 +2363,7 @@ index 6987bee4bf2c1f3d47ffdd5329f6c0c63a2962a5..521f485366c65527ac3289dd27d8f2e3
} else if (this.getIpBans().isBanned(socketaddress) && !this.getIpBans().get(socketaddress).hasExpired()) {
IpBanListEntry ipbanentry = this.ipBans.get(socketaddress);
-@@ -610,17 +610,17 @@ public abstract class PlayerList {
+@@ -611,17 +611,17 @@ public abstract class PlayerList {
}
// return chatmessage;
@@ -2384,7 +2384,7 @@ index 6987bee4bf2c1f3d47ffdd5329f6c0c63a2962a5..521f485366c65527ac3289dd27d8f2e3
return null;
}
return entity;
-@@ -1128,7 +1128,7 @@ public abstract class PlayerList {
+@@ -1129,7 +1129,7 @@ public abstract class PlayerList {
public void removeAll() {
// CraftBukkit start - disconnect safely
for (ServerPlayer player : this.players) {
@@ -2393,7 +2393,7 @@ index 6987bee4bf2c1f3d47ffdd5329f6c0c63a2962a5..521f485366c65527ac3289dd27d8f2e3
}
// CraftBukkit end
-@@ -1169,14 +1169,25 @@ public abstract class PlayerList {
+@@ -1170,14 +1170,25 @@ public abstract class PlayerList {
}
public void broadcastChatMessage(PlayerChatMessage message, ServerPlayer sender, ChatType.Bound params) {
@@ -2421,7 +2421,7 @@ index 6987bee4bf2c1f3d47ffdd5329f6c0c63a2962a5..521f485366c65527ac3289dd27d8f2e3
OutgoingPlayerChatMessage outgoingplayerchatmessage = OutgoingPlayerChatMessage.create(message);
boolean flag1 = message.isFullyFiltered();
boolean flag2 = false;
-@@ -1186,7 +1197,7 @@ public abstract class PlayerList {
+@@ -1187,7 +1198,7 @@ public abstract class PlayerList {
ServerPlayer entityplayer1 = (ServerPlayer) iterator.next();
boolean flag3 = shouldSendFiltered.test(entityplayer1);
@@ -2430,7 +2430,7 @@ index 6987bee4bf2c1f3d47ffdd5329f6c0c63a2962a5..521f485366c65527ac3289dd27d8f2e3
if (sender != entityplayer1) {
flag2 |= flag1 && flag3;
}
-@@ -1213,7 +1224,7 @@ public abstract class PlayerList {
+@@ -1214,7 +1225,7 @@ public abstract class PlayerList {
}
diff --git a/patches/server/0012-Timings-v2.patch b/patches/server/0012-Timings-v2.patch
index 0d50ea9704..7d8b8c763c 100644
--- a/patches/server/0012-Timings-v2.patch
+++ b/patches/server/0012-Timings-v2.patch
@@ -1309,7 +1309,7 @@ index 93d02b5de0721e3c5903e80bbf8b3b56ec3ab45d..4e7db441f68019d6e5d3359605b76bc4
}
// CraftBukkit end
diff --git a/src/main/java/net/minecraft/server/players/PlayerList.java b/src/main/java/net/minecraft/server/players/PlayerList.java
-index 521f485366c65527ac3289dd27d8f2e311706a10..5833cc3d5014dad82607afc4d643b6bed885be64 100644
+index 3710f544a491a837b973daedc2dfa51357b70b56..e7fcb402e3d4e0707a28505a9fb6642764034e23 100644
--- a/src/main/java/net/minecraft/server/players/PlayerList.java
+++ b/src/main/java/net/minecraft/server/players/PlayerList.java
@@ -1,5 +1,6 @@
@@ -1319,7 +1319,7 @@ index 521f485366c65527ac3289dd27d8f2e311706a10..5833cc3d5014dad82607afc4d643b6be
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
-@@ -1018,10 +1019,11 @@ public abstract class PlayerList {
+@@ -1019,10 +1020,11 @@ public abstract class PlayerList {
}
public void saveAll() {
@@ -1588,10 +1588,10 @@ index dec38e58e30c84887e9d29436c0f76c70c0a627d..be08224c8107aab3e9a3645a20977dd1
private static final CraftPersistentDataTypeRegistry DATA_TYPE_REGISTRY = new CraftPersistentDataTypeRegistry();
public CraftPersistentDataContainer persistentDataContainer;
diff --git a/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java b/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java
-index ab986a3d1dc2f605b5b84d2b62cd97007e3a2c22..58a245b2ca6e65d491694142ad04d38236b46434 100644
+index bcd0287d99eeba2b3534b4a298dc4b79b293ec58..b322d9b7bd9e107a9adf995b6c4db4ff0af05fc1 100644
--- a/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java
+++ b/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java
-@@ -681,6 +681,7 @@ public class LevelChunk extends ChunkAccess {
+@@ -680,6 +680,7 @@ public class LevelChunk extends ChunkAccess {
server.getPluginManager().callEvent(new org.bukkit.event.world.ChunkLoadEvent(this.bukkitChunk, this.needsDecoration));
if (this.needsDecoration) {
@@ -1599,7 +1599,7 @@ index ab986a3d1dc2f605b5b84d2b62cd97007e3a2c22..58a245b2ca6e65d491694142ad04d382
this.needsDecoration = false;
java.util.Random random = new java.util.Random();
random.setSeed(this.level.getSeed());
-@@ -700,6 +701,7 @@ public class LevelChunk extends ChunkAccess {
+@@ -699,6 +700,7 @@ public class LevelChunk extends ChunkAccess {
}
}
server.getPluginManager().callEvent(new org.bukkit.event.world.ChunkPopulateEvent(this.bukkitChunk));
diff --git a/patches/server/0760-Rewrite-dataconverter-system.patch b/patches/server/0013-Rewrite-dataconverter-system.patch
similarity index 99%
rename from patches/server/0760-Rewrite-dataconverter-system.patch
rename to patches/server/0013-Rewrite-dataconverter-system.patch
index 12653fcbc3..eb71ed8fe3 100644
--- a/patches/server/0760-Rewrite-dataconverter-system.patch
+++ b/patches/server/0013-Rewrite-dataconverter-system.patch
@@ -22656,10 +22656,10 @@ index 0000000000000000000000000000000000000000..967ad1186cbc81a76a4958ea99d4eff3
+ }
+}
diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/ChunkStorage.java b/src/main/java/net/minecraft/world/level/chunk/storage/ChunkStorage.java
-index fee9a8e74bfcc94942991b56799debf67b551f43..b230a3d475357d2ffd340f9a89934ea7227e69d0 100644
+index c56946f86565ad1ac41bb7b655c113f648d2f539..9730ee10042e02741383c8153eb3b7b7103f80e0 100644
--- a/src/main/java/net/minecraft/world/level/chunk/storage/ChunkStorage.java
+++ b/src/main/java/net/minecraft/world/level/chunk/storage/ChunkStorage.java
-@@ -87,7 +87,7 @@ public class ChunkStorage implements AutoCloseable {
+@@ -78,7 +78,7 @@ public class ChunkStorage implements AutoCloseable {
int i = ChunkStorage.getVersion(nbttagcompound);
// CraftBukkit start
@@ -22668,16 +22668,16 @@ index fee9a8e74bfcc94942991b56799debf67b551f43..b230a3d475357d2ffd340f9a89934ea7
CompoundTag level = nbttagcompound.getCompound("Level");
if (level.getBoolean("TerrainPopulated") && !level.getBoolean("LightPopulated")) {
ServerChunkCache cps = (generatoraccess == null) ? null : ((ServerLevel) generatoraccess).getChunkSource();
-@@ -99,7 +99,7 @@ public class ChunkStorage implements AutoCloseable {
+@@ -90,7 +90,7 @@ public class ChunkStorage implements AutoCloseable {
// CraftBukkit end
if (i < 1493) {
- nbttagcompound = NbtUtils.update(this.fixerUpper, DataFixTypes.CHUNK, nbttagcompound, i, 1493);
+ ca.spottedleaf.dataconverter.minecraft.MCDataConverter.convertTag(ca.spottedleaf.dataconverter.minecraft.datatypes.MCTypeRegistry.CHUNK, nbttagcompound, i, 1493); // Paper - replace chunk converter
if (nbttagcompound.getCompound("Level").getBoolean("hasLegacyStructureData")) {
- synchronized (this.persistentDataLock) { // Paper - Async chunk loading
LegacyStructureDataHandler persistentstructurelegacy = this.getLegacyStructureHandler(resourcekey, supplier);
-@@ -119,7 +119,7 @@ public class ChunkStorage implements AutoCloseable {
+
+@@ -108,7 +108,7 @@ public class ChunkStorage implements AutoCloseable {
// Spigot end
ChunkStorage.injectDatafixingContext(nbttagcompound, resourcekey, optional);
@@ -22687,10 +22687,10 @@ index fee9a8e74bfcc94942991b56799debf67b551f43..b230a3d475357d2ffd340f9a89934ea7
nbttagcompound.putInt("DataVersion", SharedConstants.getCurrentVersion().getWorldVersion());
}
diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/EntityStorage.java b/src/main/java/net/minecraft/world/level/chunk/storage/EntityStorage.java
-index de7afc737b1ab099edc29a4ef94baa76329c2947..2bc0384728f89b7c64a8beec78a1b77dc063d37b 100644
+index dae66dd5dbebc7fd8fc331b1f5f06ec461667830..0ede151943109e81f66875340261d77f67f63c95 100644
--- a/src/main/java/net/minecraft/world/level/chunk/storage/EntityStorage.java
+++ b/src/main/java/net/minecraft/world/level/chunk/storage/EntityStorage.java
-@@ -128,7 +128,7 @@ public class EntityStorage implements EntityPersistentStorage {
+@@ -117,7 +117,7 @@ public class EntityStorage implements EntityPersistentStorage {
private CompoundTag upgradeChunkTag(CompoundTag chunkNbt) {
int i = getVersion(chunkNbt);
@@ -22700,10 +22700,10 @@ index de7afc737b1ab099edc29a4ef94baa76329c2947..2bc0384728f89b7c64a8beec78a1b77d
public static int getVersion(CompoundTag chunkNbt) {
diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/SectionStorage.java b/src/main/java/net/minecraft/world/level/chunk/storage/SectionStorage.java
-index bb59cff9ba570923a40c1612d5812a64390454ee..10e8d1e36639cca21aa451e81cdab90ba9e9a496 100644
+index 8a4750dd8f604062c4ea452f7b97b05a0c8d583a..a0b61647e5a7e5989aed52522bc9a43bc487421c 100644
--- a/src/main/java/net/minecraft/world/level/chunk/storage/SectionStorage.java
+++ b/src/main/java/net/minecraft/world/level/chunk/storage/SectionStorage.java
-@@ -148,7 +148,14 @@ public class SectionStorage extends RegionFileStorage implements AutoCloseabl
+@@ -142,7 +142,14 @@ public class SectionStorage implements AutoCloseable {
int j = getVersion(dynamic);
int k = SharedConstants.getCurrentVersion().getWorldVersion();
boolean bl = j != k;
@@ -22733,7 +22733,7 @@ index 963ad3ce1ef83888ae1537ff01accdbb5b04ffa1..a7cba5b828a586d7435bda4d512af686
LOGGER.warn("Failed to partially datafix chunk {}", pos, var12);
return StructureCheckResult.CHUNK_LOAD_NEEDED;
diff --git a/src/main/java/net/minecraft/world/level/storage/PlayerDataStorage.java b/src/main/java/net/minecraft/world/level/storage/PlayerDataStorage.java
-index d785efd61caa2237e05d9ce3dbf84d86076ff047..601f8099f74e81c17600566b3c9b7a6dd39c9bcb 100644
+index 86fb11e9e197357871d603c4f8ce778660d507cf..bf4c895794c2bc2ad65faa128c6fa92cb0656841 100644
--- a/src/main/java/net/minecraft/world/level/storage/PlayerDataStorage.java
+++ b/src/main/java/net/minecraft/world/level/storage/PlayerDataStorage.java
@@ -93,7 +93,7 @@ public class PlayerDataStorage {
diff --git a/patches/server/0788-Rewrite-the-light-engine.patch b/patches/server/0014-Starlight.patch
similarity index 94%
rename from patches/server/0788-Rewrite-the-light-engine.patch
rename to patches/server/0014-Starlight.patch
index 1be70f40a4..182c80449e 100644
--- a/patches/server/0788-Rewrite-the-light-engine.patch
+++ b/patches/server/0014-Starlight.patch
@@ -1,40 +1,9 @@
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Spottedleaf
+From: Spottedleaf
Date: Wed, 28 Oct 2020 16:51:55 -0700
-Subject: [PATCH] Rewrite the light engine
+Subject: [PATCH] Starlight
-The standard vanilla light engine is plagued by
-awful performance. Paper's changes to the light engine
-help a bit, however they appear to cause some lighting
-errors - most easily noticed in coral generation.
-
-The vanilla light engine's is too abstract to be modified -
-so an entirely new implementation is required to fix the
-performance and lighting errors.
-
-The new implementation is designed primarily to optimise
-light level propagations (increase and decrease). Unlike
-the vanilla light engine, this implementation tracks more
-information per queued value when performing a
-breadth first search. Vanilla just tracks coordinate, which
-means every time they handle a queued value, they must
-also determine the coordinate's target light level
-from its neighbours - very wasteful, especially considering
-these checks read neighbour block data.
-The new light engine tracks both position and target level,
-as well as whether the target block needs to be read at all
-(for checking sided propagation). So, the work done per coordinate
-is significantly reduced because no work is done for calculating
-the target level.
-In my testing, the block get calls were reduced by approximately
-an order of magnitude. However, the light read checks were only
-reduced by approximately 2x - but this is fine, light read checks
-are extremely cheap compared to block gets.
-
-Generation testing showed that the new light engine improved
-total generation (not lighting itself, but the whole generation process)
-by 2x. According to cpu time, the light engine itself spent 10x less time
-lighting chunks for generation.
+See https://github.com/PaperMC/Starlight
diff --git a/src/main/java/ca/spottedleaf/starlight/common/light/BlockStarLightEngine.java b/src/main/java/ca/spottedleaf/starlight/common/light/BlockStarLightEngine.java
new file mode 100644
@@ -4357,53 +4326,99 @@ index 0000000000000000000000000000000000000000..dd995e25ae620ae36cd5eecb2fe10ad0
+ }
+
+}
-diff --git a/src/main/java/io/papermc/paper/command/subcommands/FixLightCommand.java b/src/main/java/io/papermc/paper/command/subcommands/FixLightCommand.java
-index 190df802cb24aa360f6cf4d291e38b4b3fe4a2ac..68645bbbab9b4225048b647252d8f462028a9c84 100644
---- a/src/main/java/io/papermc/paper/command/subcommands/FixLightCommand.java
-+++ b/src/main/java/io/papermc/paper/command/subcommands/FixLightCommand.java
-@@ -10,6 +10,7 @@ import net.minecraft.server.level.ServerLevel;
- import net.minecraft.server.level.ServerPlayer;
- import net.minecraft.server.level.ThreadedLevelLightEngine;
- import net.minecraft.world.level.ChunkPos;
-+import net.minecraft.world.level.chunk.ChunkAccess;
- import net.minecraft.world.level.chunk.LevelChunk;
- import org.bukkit.command.CommandSender;
- import org.bukkit.craftbukkit.entity.CraftPlayer;
-@@ -19,6 +20,8 @@ import org.checkerframework.checker.nullness.qual.Nullable;
- import org.checkerframework.framework.qual.DefaultQualifier;
+diff --git a/src/main/java/io/papermc/paper/command/PaperCommand.java b/src/main/java/io/papermc/paper/command/PaperCommand.java
+index b3a58bf4b654e336826dc04da9e2f80ff8b9a9a7..c9a2ac696f7cefc8b0715f53db3fc541f26b62f6 100644
+--- a/src/main/java/io/papermc/paper/command/PaperCommand.java
++++ b/src/main/java/io/papermc/paper/command/PaperCommand.java
+@@ -1,6 +1,7 @@
+ package io.papermc.paper.command;
- import static net.kyori.adventure.text.Component.text;
+ import io.papermc.paper.command.subcommands.EntityCommand;
++import io.papermc.paper.command.subcommands.FixLightCommand;
+ import io.papermc.paper.command.subcommands.HeapDumpCommand;
+ import io.papermc.paper.command.subcommands.ReloadCommand;
+ import io.papermc.paper.command.subcommands.VersionCommand;
+@@ -40,6 +41,7 @@ public final class PaperCommand extends Command {
+ commands.put(Set.of("entity"), new EntityCommand());
+ commands.put(Set.of("reload"), new ReloadCommand());
+ commands.put(Set.of("version"), new VersionCommand());
++ commands.put(Set.of("fixlight"), new FixLightCommand());
+
+ return commands.entrySet().stream()
+ .flatMap(entry -> entry.getKey().stream().map(s -> Map.entry(s, entry.getValue())))
+diff --git a/src/main/java/io/papermc/paper/command/subcommands/FixLightCommand.java b/src/main/java/io/papermc/paper/command/subcommands/FixLightCommand.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..450bd95218852174cfbc88d4517e17daee5ffd5f
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/command/subcommands/FixLightCommand.java
+@@ -0,0 +1,115 @@
++package io.papermc.paper.command.subcommands;
++
++import io.papermc.paper.command.PaperSubcommand;
++import java.util.ArrayDeque;
++import java.util.Deque;
++import net.minecraft.server.MCUtil;
++import net.minecraft.server.MinecraftServer;
++import net.minecraft.server.level.ChunkHolder;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.server.level.ServerPlayer;
++import net.minecraft.server.level.ThreadedLevelLightEngine;
++import net.minecraft.world.level.ChunkPos;
++import net.minecraft.world.level.chunk.ChunkAccess;
++import net.minecraft.world.level.chunk.LevelChunk;
++import org.bukkit.command.CommandSender;
++import org.bukkit.craftbukkit.entity.CraftPlayer;
++import org.bukkit.entity.Player;
++import org.checkerframework.checker.nullness.qual.NonNull;
++import org.checkerframework.checker.nullness.qual.Nullable;
++import org.checkerframework.framework.qual.DefaultQualifier;
++
++import static net.kyori.adventure.text.Component.text;
+import static net.kyori.adventure.text.format.NamedTextColor.BLUE;
+import static net.kyori.adventure.text.format.NamedTextColor.DARK_AQUA;
- import static net.kyori.adventure.text.format.NamedTextColor.GREEN;
- import static net.kyori.adventure.text.format.NamedTextColor.RED;
-
-@@ -44,7 +47,7 @@ public final class FixLightCommand implements PaperSubcommand {
- sender.sendMessage(text("Radius cannot be negative!", RED));
- return;
- }
-- final int maxRadius = 5;
-+ final int maxRadius = 32; // Paper - MOOOOOORE
- radius = Math.min(maxRadius, parsed);
- if (radius != parsed) {
- post = () -> sender.sendMessage(text("Radius '" + parsed + "' was not in the required range [0, " + maxRadius + "], it was lowered to the maximum (" + maxRadius + " chunks).", RED));
-@@ -59,12 +62,67 @@ public final class FixLightCommand implements PaperSubcommand {
- ServerPlayer handle = player.getHandle();
- ServerLevel world = (ServerLevel) handle.level;
- ThreadedLevelLightEngine lightengine = world.getChunkSource().getLightEngine();
-+ // Paper start - rewrite light engine
-+ if (true) {
-+ this.starlightFixLight(handle, world, lightengine, radius, post);
++import static net.kyori.adventure.text.format.NamedTextColor.GREEN;
++import static net.kyori.adventure.text.format.NamedTextColor.RED;
++
++@DefaultQualifier(NonNull.class)
++public final class FixLightCommand implements PaperSubcommand {
++ @Override
++ public boolean execute(final CommandSender sender, final String subCommand, final String[] args) {
++ this.doFixLight(sender, args);
++ return true;
++ }
++
++ private void doFixLight(final CommandSender sender, final String[] args) {
++ if (!(sender instanceof Player)) {
++ sender.sendMessage(text("Only players can use this command", RED));
+ return;
+ }
-+ // Paper end - rewrite light engine
-
- net.minecraft.core.BlockPos center = MCUtil.toBlockPosition(player.getLocation());
- Deque queue = new ArrayDeque<>(MCUtil.getSpiralOutChunks(center, radius));
- updateLight(sender, world, lightengine, queue, post);
- }
-
-+ // Paper start - rewrite light engine
++ @Nullable Runnable post = null;
++ int radius = 2;
++ if (args.length > 0) {
++ try {
++ final int parsed = Integer.parseInt(args[0]);
++ if (parsed < 0) {
++ sender.sendMessage(text("Radius cannot be negative!", RED));
++ return;
++ }
++ final int maxRadius = 32;
++ radius = Math.min(maxRadius, parsed);
++ if (radius != parsed) {
++ post = () -> sender.sendMessage(text("Radius '" + parsed + "' was not in the required range [0, " + maxRadius + "], it was lowered to the maximum (" + maxRadius + " chunks).", RED));
++ }
++ } catch (final Exception e) {
++ sender.sendMessage(text("'" + args[0] + "' is not a valid number.", RED));
++ return;
++ }
++ }
++
++ CraftPlayer player = (CraftPlayer) sender;
++ ServerPlayer handle = player.getHandle();
++ ServerLevel world = (ServerLevel) handle.level;
++ ThreadedLevelLightEngine lightengine = world.getChunkSource().getLightEngine();
++ this.starlightFixLight(handle, world, lightengine, radius, post);
++ }
++
+ private void starlightFixLight(
+ final ServerPlayer sender,
+ final ServerLevel world,
@@ -4411,10 +4426,10 @@ index 190df802cb24aa360f6cf4d291e38b4b3fe4a2ac..68645bbbab9b4225048b647252d8f462
+ final int radius,
+ final @Nullable Runnable done
+ ) {
-+ long start = System.nanoTime();
-+ java.util.LinkedHashSet chunks = new java.util.LinkedHashSet<>(MCUtil.getSpiralOutChunks(sender.blockPosition(), radius)); // getChunkCoordinates is actually just bad mappings, this function rets position as blockpos
++ final long start = System.nanoTime();
++ final java.util.LinkedHashSet chunks = new java.util.LinkedHashSet<>(MCUtil.getSpiralOutChunks(sender.blockPosition(), radius)); // getChunkCoordinates is actually just bad mappings, this function rets position as blockpos
+
-+ int[] pending = new int[1];
++ final int[] pending = new int[1];
+ for (java.util.Iterator iterator = chunks.iterator(); iterator.hasNext(); ) {
+ final ChunkPos chunkPos = iterator.next();
+
@@ -4428,16 +4443,16 @@ index 190df802cb24aa360f6cf4d291e38b4b3fe4a2ac..68645bbbab9b4225048b647252d8f462
+ ++pending[0];
+ }
+
-+ int[] relitChunks = new int[1];
++ final int[] relitChunks = new int[1];
+ lightengine.relight(chunks,
-+ (ChunkPos chunkPos) -> {
++ (final ChunkPos chunkPos) -> {
+ ++relitChunks[0];
+ sender.getBukkitEntity().sendMessage(text().color(DARK_AQUA).append(
+ text("Relit chunk ", BLUE), text(chunkPos.toString()),
+ text(", progress: ", BLUE), text((int) (Math.round(100.0 * (double) (relitChunks[0]) / (double) pending[0])) + "%")
+ ));
+ },
-+ (int totalRelit) -> {
++ (final int totalRelit) -> {
+ final long end = System.nanoTime();
+ final long diff = Math.round(1.0e-6 * (end - start));
+ sender.getBukkitEntity().sendMessage(text().color(DARK_AQUA).append(
@@ -4447,16 +4462,13 @@ index 190df802cb24aa360f6cf4d291e38b4b3fe4a2ac..68645bbbab9b4225048b647252d8f462
+ if (done != null) {
+ done.run();
+ }
-+ });
++ }
++ );
+ sender.getBukkitEntity().sendMessage(text().color(BLUE).append(text("Relighting "), text(pending[0], DARK_AQUA), text(" chunks")));
+ }
-+ // Paper end - rewrite light engine
-+
- private void updateLight(
- final CommandSender sender,
- final ServerLevel world,
++}
diff --git a/src/main/java/net/minecraft/server/level/ChunkHolder.java b/src/main/java/net/minecraft/server/level/ChunkHolder.java
-index 09f262de1b12b09013f8277b25d13ffcf53b96d8..73712d6b9c828427d4c066c6d8672534575f3793 100644
+index 0873134f1f6de0c372ba28b89a20302c9a0115d8..86c33f029ae56fcace51b69763202be9f8bc5f44 100644
--- a/src/main/java/net/minecraft/server/level/ChunkHolder.java
+++ b/src/main/java/net/minecraft/server/level/ChunkHolder.java
@@ -55,7 +55,7 @@ public class ChunkHolder {
@@ -4469,33 +4481,33 @@ index 09f262de1b12b09013f8277b25d13ffcf53b96d8..73712d6b9c828427d4c066c6d8672534
private final DebugBuffer chunkToSaveHistory;
public int oldTicketLevel;
diff --git a/src/main/java/net/minecraft/server/level/ChunkMap.java b/src/main/java/net/minecraft/server/level/ChunkMap.java
-index 69b8f5dcae4ea75ea9d63c36b3f5b4383fe232f9..fe10c770b511fa8a38ece2bf9679492a85b28eff 100644
+index 2a9e5fb8164f79b0f9c1cb5497216e51f9df3454..cbd4e749574c55c6e52f42b62dd6da8cfcca97f9 100644
--- a/src/main/java/net/minecraft/server/level/ChunkMap.java
+++ b/src/main/java/net/minecraft/server/level/ChunkMap.java
-@@ -133,7 +133,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
+@@ -128,7 +128,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
public final LongSet entitiesInLevel;
public final ServerLevel level;
private final ThreadedLevelLightEngine lightEngine;
- private final BlockableEventLoop mainThreadExecutor;
+ public final BlockableEventLoop mainThreadExecutor; // Paper - public
- final java.util.concurrent.Executor mainInvokingExecutor; // Paper
public ChunkGenerator generator;
private RandomState randomState;
+ public final Supplier overworldDataStorage;
diff --git a/src/main/java/net/minecraft/server/level/DistanceManager.java b/src/main/java/net/minecraft/server/level/DistanceManager.java
-index 537d34a0325a985948c744929b90144a66a35ee3..06e4d3a02e0d1326b7029157856476db4ef3575e 100644
+index fbe62a31ab199d83a1db0a4e0b1a813824e6f2c2..d38ad1b1eee92a6dbd2b79b4fcdb8959cdb4007d 100644
--- a/src/main/java/net/minecraft/server/level/DistanceManager.java
+++ b/src/main/java/net/minecraft/server/level/DistanceManager.java
-@@ -545,7 +545,7 @@ public abstract class DistanceManager {
+@@ -390,7 +390,7 @@ public abstract class DistanceManager {
}
public void removeTicketsOnClosing() {
-- ImmutableSet> immutableset = ImmutableSet.of(TicketType.UNKNOWN, TicketType.POST_TELEPORT, TicketType.LIGHT, TicketType.FUTURE_AWAIT, TicketType.ASYNC_LOAD, TicketType.REQUIRED_LOAD); // Paper - add additional tickets to preserve
-+ ImmutableSet> immutableset = ImmutableSet.of(TicketType.UNKNOWN, TicketType.POST_TELEPORT, TicketType.LIGHT, TicketType.FUTURE_AWAIT, TicketType.ASYNC_LOAD, TicketType.REQUIRED_LOAD, TicketType.CHUNK_RELIGHT, ca.spottedleaf.starlight.common.light.StarLightInterface.CHUNK_WORK_TICKET); // Paper - add additional tickets to preserve
+- ImmutableSet> immutableset = ImmutableSet.of(TicketType.UNKNOWN, TicketType.POST_TELEPORT, TicketType.LIGHT, TicketType.FUTURE_AWAIT); // Paper - add additional tickets to preserve
++ ImmutableSet> immutableset = ImmutableSet.of(TicketType.UNKNOWN, TicketType.POST_TELEPORT, TicketType.LIGHT, TicketType.FUTURE_AWAIT, TicketType.CHUNK_RELIGHT, ca.spottedleaf.starlight.common.light.StarLightInterface.CHUNK_WORK_TICKET); // Paper - add additional tickets to preserve
ObjectIterator objectiterator = this.tickets.long2ObjectEntrySet().fastIterator();
while (objectiterator.hasNext()) {
diff --git a/src/main/java/net/minecraft/server/level/ThreadedLevelLightEngine.java b/src/main/java/net/minecraft/server/level/ThreadedLevelLightEngine.java
-index 5539f2a7e069cbe98997b734f3b1cd498148f09b..b57bffce30154b196b879209c1ce559d0b82456e 100644
+index 5b238e41ffa3e374b52ee955cb39087571c6ffc2..89f3380632b098aaf95d68a386bc7e72c8c27f5c 100644
--- a/src/main/java/net/minecraft/server/level/ThreadedLevelLightEngine.java
+++ b/src/main/java/net/minecraft/server/level/ThreadedLevelLightEngine.java
@@ -23,6 +23,17 @@ import net.minecraft.world.level.chunk.LightChunkGetter;
@@ -4516,7 +4528,7 @@ index 5539f2a7e069cbe98997b734f3b1cd498148f09b..b57bffce30154b196b879209c1ce559d
public class ThreadedLevelLightEngine extends LevelLightEngine implements AutoCloseable {
private static final Logger LOGGER = LogUtils.getLogger();
private final ProcessorMailbox taskMailbox;
-@@ -157,13 +168,168 @@ public class ThreadedLevelLightEngine extends LevelLightEngine implements AutoCl
+@@ -32,13 +43,168 @@ public class ThreadedLevelLightEngine extends LevelLightEngine implements AutoCl
private volatile int taskPerBatch = 5;
private final AtomicBoolean scheduled = new AtomicBoolean();
@@ -4529,7 +4541,7 @@ index 5539f2a7e069cbe98997b734f3b1cd498148f09b..b57bffce30154b196b879209c1ce559d
public ThreadedLevelLightEngine(LightChunkGetter chunkProvider, ChunkMap chunkStorage, boolean hasBlockLight, ProcessorMailbox processor, ProcessorHandle> executor) {
- super(chunkProvider, true, hasBlockLight);
+ super(chunkProvider, false, false); // Paper - destroy vanilla light engine state
- this.chunkMap = chunkStorage; this.playerChunkMap = chunkMap; // Paper
+ this.chunkMap = chunkStorage;
this.sorterMailbox = executor;
this.taskMailbox = processor;
+ // Paper start - replace light engine impl
@@ -4539,7 +4551,7 @@ index 5539f2a7e069cbe98997b734f3b1cd498148f09b..b57bffce30154b196b879209c1ce559d
+ // Paper end - replace light engine impl
+ }
+
-+// Paper start - replace light engine impl
++ // Paper start - replace light engine impl
+ protected final ChunkAccess getChunk(final int chunkX, final int chunkZ) {
+ return ((ServerLevel)this.theLightEngine.getWorld()).getChunkSource().getChunkAtImmediately(chunkX, chunkZ);
+ }
@@ -4664,7 +4676,7 @@ index 5539f2a7e069cbe98997b734f3b1cd498148f09b..b57bffce30154b196b879209c1ce559d
+ @Override
+ public boolean hasLightWork() {
+ // route to new light engine
-+ return this.theLightEngine.hasUpdates() || !this.queue.isEmpty();
++ return this.theLightEngine.hasUpdates();
}
+ @Override
@@ -4686,7 +4698,7 @@ index 5539f2a7e069cbe98997b734f3b1cd498148f09b..b57bffce30154b196b879209c1ce559d
@Override
public void close() {
}
-@@ -180,15 +346,16 @@ public class ThreadedLevelLightEngine extends LevelLightEngine implements AutoCl
+@@ -55,15 +221,16 @@ public class ThreadedLevelLightEngine extends LevelLightEngine implements AutoCl
@Override
public void checkBlock(BlockPos pos) {
@@ -4709,7 +4721,7 @@ index 5539f2a7e069cbe98997b734f3b1cd498148f09b..b57bffce30154b196b879209c1ce559d
this.addTask(pos.x, pos.z, () -> {
return 0;
}, ThreadedLevelLightEngine.TaskType.PRE_UPDATE, Util.name(() -> {
-@@ -211,17 +378,16 @@ public class ThreadedLevelLightEngine extends LevelLightEngine implements AutoCl
+@@ -86,17 +253,16 @@ public class ThreadedLevelLightEngine extends LevelLightEngine implements AutoCl
@Override
public void updateSectionStatus(SectionPos pos, boolean notReady) {
@@ -4733,7 +4745,7 @@ index 5539f2a7e069cbe98997b734f3b1cd498148f09b..b57bffce30154b196b879209c1ce559d
this.addTask(pos.x, pos.z, ThreadedLevelLightEngine.TaskType.PRE_UPDATE, Util.name(() -> {
super.enableLightSources(pos, retainData);
}, () -> {
-@@ -231,6 +397,7 @@ public class ThreadedLevelLightEngine extends LevelLightEngine implements AutoCl
+@@ -106,6 +272,7 @@ public class ThreadedLevelLightEngine extends LevelLightEngine implements AutoCl
@Override
public void queueSectionData(LightLayer lightType, SectionPos pos, @Nullable DataLayer nibbles, boolean nonEdge) {
@@ -4741,7 +4753,7 @@ index 5539f2a7e069cbe98997b734f3b1cd498148f09b..b57bffce30154b196b879209c1ce559d
this.addTask(pos.x(), pos.z(), () -> {
return 0;
}, ThreadedLevelLightEngine.TaskType.PRE_UPDATE, Util.name(() -> {
-@@ -252,6 +419,7 @@ public class ThreadedLevelLightEngine extends LevelLightEngine implements AutoCl
+@@ -131,6 +298,7 @@ public class ThreadedLevelLightEngine extends LevelLightEngine implements AutoCl
@Override
public void retainData(ChunkPos pos, boolean retainData) {
@@ -4749,7 +4761,7 @@ index 5539f2a7e069cbe98997b734f3b1cd498148f09b..b57bffce30154b196b879209c1ce559d
this.addTask(pos.x, pos.z, () -> {
return 0;
}, ThreadedLevelLightEngine.TaskType.PRE_UPDATE, Util.name(() -> {
-@@ -274,6 +442,37 @@ public class ThreadedLevelLightEngine extends LevelLightEngine implements AutoCl
+@@ -153,6 +321,37 @@ public class ThreadedLevelLightEngine extends LevelLightEngine implements AutoCl
}
public CompletableFuture lightChunk(ChunkAccess chunk, boolean excludeBlocks) {
@@ -4785,61 +4797,55 @@ index 5539f2a7e069cbe98997b734f3b1cd498148f09b..b57bffce30154b196b879209c1ce559d
+ }
+ // Paper end - replace light engine impl
ChunkPos chunkPos = chunk.getPos();
- // Paper start
- //ichunkaccess.b(false); // Don't need to disable this
-@@ -316,7 +515,7 @@ public class ThreadedLevelLightEngine extends LevelLightEngine implements AutoCl
+ chunk.setLightCorrect(false);
+ this.addTask(chunkPos.x, chunkPos.z, ThreadedLevelLightEngine.TaskType.PRE_UPDATE, Util.name(() -> {
+@@ -187,7 +386,7 @@ public class ThreadedLevelLightEngine extends LevelLightEngine implements AutoCl
}
public void tryScheduleUpdate() {
-- if ((!this.queue.isEmpty() || super.hasLightWork()) && this.scheduled.compareAndSet(false, true)) { // Paper
+- if ((!this.lightTasks.isEmpty() || super.hasLightWork()) && this.scheduled.compareAndSet(false, true)) {
+ if (this.hasLightWork() && this.scheduled.compareAndSet(false, true)) { // Paper // Paper - rewrite light engine
this.taskMailbox.tell(() -> {
this.runUpdate();
this.scheduled.set(false);
-@@ -333,12 +532,12 @@ public class ThreadedLevelLightEngine extends LevelLightEngine implements AutoCl
- if (queue.poll(pre, post)) {
- pre.forEach(Runnable::run);
- pre.clear();
-- super.runUpdates(Integer.MAX_VALUE, true, true);
-+ this.theLightEngine.propagateChanges(); // Paper - rewrite light engine
- post.forEach(Runnable::run);
- post.clear();
- } else {
- // might have level updates to go still
-- super.runUpdates(Integer.MAX_VALUE, true, true);
-+ this.theLightEngine.propagateChanges(); // Paper - rewrite light engine
+@@ -209,7 +408,7 @@ public class ThreadedLevelLightEngine extends LevelLightEngine implements AutoCl
}
- // Paper end
- }
+
+ objectListIterator.back(j);
+- super.runUpdates(Integer.MAX_VALUE, true, true);
++ this.theLightEngine.propagateChanges(); // Paper - rewrite light engine
+
+ for(int var5 = 0; objectListIterator.hasNext() && var5 < i; ++var5) {
+ Pair pair2 = objectListIterator.next();
diff --git a/src/main/java/net/minecraft/server/level/TicketType.java b/src/main/java/net/minecraft/server/level/TicketType.java
-index 41ddcf6775f99c56cf4b13b284420061e5dd6bdc..ae46429264e6a7e5c88b6b6a41a6df4db7b3e70d 100644
+index 0d536d72ac918fbd403397ff369d10143ee9c204..6051e5f272838ef23276a90e21c2fc821ca155d1 100644
--- a/src/main/java/net/minecraft/server/level/TicketType.java
+++ b/src/main/java/net/minecraft/server/level/TicketType.java
-@@ -32,6 +32,7 @@ public class TicketType {
+@@ -26,6 +26,7 @@ public class TicketType {
+ public static final TicketType UNKNOWN = TicketType.create("unknown", Comparator.comparingLong(ChunkPos::toLong), 1);
+ public static final TicketType PLUGIN = TicketType.create("plugin", (a, b) -> 0); // CraftBukkit
public static final TicketType PLUGIN_TICKET = TicketType.create("plugin_ticket", (plugin1, plugin2) -> plugin1.getClass().getName().compareTo(plugin2.getClass().getName())); // CraftBukkit
- public static final TicketType DELAY_UNLOAD = create("delay_unload", Long::compareTo, 300); // Paper
- public static final TicketType REQUIRED_LOAD = create("required_load", Long::compareTo); // Paper - make sure getChunkAt does not fail
+ public static final TicketType CHUNK_RELIGHT = create("light_update", Long::compareTo); // Paper - ensure chunks stay loaded for lighting
public static TicketType create(String name, Comparator argumentComparator) {
return new TicketType<>(name, argumentComparator, 0L);
diff --git a/src/main/java/net/minecraft/world/level/block/state/BlockBehaviour.java b/src/main/java/net/minecraft/world/level/block/state/BlockBehaviour.java
-index 178d9ad7525b6743038ed45c6f85686a860ffd26..24b820484497714eb8be87e07ca1d37829d4f2c9 100644
+index f0bd06ab32e99c188510b3c3fa41f1737ab4fe78..51ac731cf49e6d2cd574e48f26c4b151e9014826 100644
--- a/src/main/java/net/minecraft/world/level/block/state/BlockBehaviour.java
+++ b/src/main/java/net/minecraft/world/level/block/state/BlockBehaviour.java
-@@ -701,6 +701,7 @@ public abstract class BlockBehaviour {
+@@ -694,6 +694,7 @@ public abstract class BlockBehaviour {
this.hasPostProcess = blockbase_info.hasPostProcess;
this.emissiveRendering = blockbase_info.emissiveRendering;
this.offsetType = (BlockBehaviour.OffsetType) blockbase_info.offsetType.apply(this.asState());
+ this.conditionallyFullOpaque = this.isOpaque() & this.isTransparentOnSomeFaces(); // Paper
}
- // Paper start - impl cached craft block data, lazy load to fix issue with loading at the wrong time
- private org.bukkit.craftbukkit.block.data.CraftBlockData cachedCraftBlockData;
-@@ -721,6 +722,18 @@ public abstract class BlockBehaviour {
- protected boolean isTicking;
- protected FluidState fluid;
+
+ // Paper start
+@@ -702,12 +703,25 @@ public abstract class BlockBehaviour {
+ return this.shapeExceedsCube;
+ }
// Paper end
-+ // Paper start
++ // Paper start - starlight
+ protected int opacityIfCached = -1;
+ // ret -1 if opacity is dynamic, or -1 if the block is conditionally full opaque, else return opacity in [0, 15]
+ public final int getOpacityIfCached() {
@@ -4850,20 +4856,19 @@ index 178d9ad7525b6743038ed45c6f85686a860ffd26..24b820484497714eb8be87e07ca1d378
+ public final boolean isConditionallyFullOpaque() {
+ return this.conditionallyFullOpaque;
+ }
-+ // Paper end
++ // Paper end - starlight
public void initCache() {
- this.fluid = this.getBlock().getFluidState(this.asState()); // Paper - moved from getFluid()
-@@ -729,6 +742,7 @@ public abstract class BlockBehaviour {
+ if (!this.getBlock().hasDynamicShape()) {
this.cache = new BlockBehaviour.BlockStateBase.Cache(this.asState());
}
this.shapeExceedsCube = this.cache == null || this.cache.largeCollisionShape; // Paper - moved from actual method to here
-+ this.opacityIfCached = this.cache == null || this.isConditionallyFullOpaque() ? -1 : this.cache.lightBlock; // Paper - cache opacity for light
++ this.opacityIfCached = this.cache == null || this.isConditionallyFullOpaque() ? -1 : this.cache.lightBlock; // Paper - starlight - cache opacity for light
}
diff --git a/src/main/java/net/minecraft/world/level/chunk/ChunkAccess.java b/src/main/java/net/minecraft/world/level/chunk/ChunkAccess.java
-index a97909e77b9b28aede8c8716831c3f9a90618f09..b68625ebb32b8d1e5bc232d7cc791edbed923378 100644
+index fabc7df600c89b01d97a76eb0b1206a32407b906..0e787d877901dfcea714b0e14e9fc4358ee30bbe 100644
--- a/src/main/java/net/minecraft/world/level/chunk/ChunkAccess.java
+++ b/src/main/java/net/minecraft/world/level/chunk/ChunkAccess.java
@@ -81,6 +81,47 @@ public abstract class ChunkAccess implements BlockGetter, BiomeManager.NoiseBiom
@@ -4914,14 +4919,37 @@ index a97909e77b9b28aede8c8716831c3f9a90618f09..b68625ebb32b8d1e5bc232d7cc791edb
public ChunkAccess(ChunkPos pos, UpgradeData upgradeData, LevelHeightAccessor heightLimitView, Registry biome, long inhabitedTime, @Nullable LevelChunkSection[] sectionArrayInitializer, @Nullable BlendingData blendingData) {
this.locX = pos.x; this.locZ = pos.z; // Paper - reduce need for field lookups
+diff --git a/src/main/java/net/minecraft/world/level/chunk/ChunkStatus.java b/src/main/java/net/minecraft/world/level/chunk/ChunkStatus.java
+index 441d46635caedfae3cb2f46d30b8d9ae95636e7b..e6240f891e396d91e31b02fdf3084be77e9d6697 100644
+--- a/src/main/java/net/minecraft/world/level/chunk/ChunkStatus.java
++++ b/src/main/java/net/minecraft/world/level/chunk/ChunkStatus.java
+@@ -292,6 +292,17 @@ public class ChunkStatus {
+ return this.chunkType;
+ }
+
++ // Paper start
++ public static ChunkStatus getStatus(String name) {
++ try {
++ // We need this otherwise we return EMPTY for invalid names
++ ResourceLocation key = new ResourceLocation(name);
++ return Registry.CHUNK_STATUS.getOptional(key).orElse(null);
++ } catch (Exception ex) {
++ return null; // invalid name
++ }
++ }
++ // Paper end
+ public static ChunkStatus byName(String id) {
+ return (ChunkStatus) Registry.CHUNK_STATUS.get(ResourceLocation.tryParse(id));
+ }
diff --git a/src/main/java/net/minecraft/world/level/chunk/EmptyLevelChunk.java b/src/main/java/net/minecraft/world/level/chunk/EmptyLevelChunk.java
-index a9c65c8d36e5c7080133706df1363b3ce52e3370..d1b175f2bb1bc96e4f044a97b14721feb44d78f5 100644
+index 80e383e9a2d12f9f1b0b0d9ae71a0add9b51c9d4..a78bf00d4559dd99869d93ec78b3525d24331925 100644
--- a/src/main/java/net/minecraft/world/level/chunk/EmptyLevelChunk.java
+++ b/src/main/java/net/minecraft/world/level/chunk/EmptyLevelChunk.java
-@@ -21,6 +21,38 @@ public class EmptyLevelChunk extends LevelChunk {
+@@ -21,6 +21,40 @@ public class EmptyLevelChunk extends LevelChunk {
this.biome = holder;
}
++ // Paper start - starlight
+ @Override
+ public ca.spottedleaf.starlight.common.light.SWMRNibbleArray[] getBlockNibbles() {
+ return ca.spottedleaf.starlight.common.light.StarLightEngine.getFilledEmptyLight(this.getLevel());
@@ -4953,12 +4981,13 @@ index a9c65c8d36e5c7080133706df1363b3ce52e3370..d1b175f2bb1bc96e4f044a97b14721fe
+
+ @Override
+ public void setBlockEmptinessMap(final boolean[] emptinessMap) {}
++ // Paper end - starlight
+
- // Paper start
@Override
- public BlockState getBlockState(int x, int y, int z) {
+ public BlockState getBlockState(BlockPos pos) {
+ return Blocks.VOID_AIR.defaultBlockState();
diff --git a/src/main/java/net/minecraft/world/level/chunk/ImposterProtoChunk.java b/src/main/java/net/minecraft/world/level/chunk/ImposterProtoChunk.java
-index 7b320357973202423c29743d922b72dc4ec11efe..8ffc206a858864d277ff94de7c66ffdb07d8f491 100644
+index 3dff0f7c3ccd04a67b2153e402d801de2341e520..ac5dff35e2df23b8790bbe65c40acc6a3c77e6ac 100644
--- a/src/main/java/net/minecraft/world/level/chunk/ImposterProtoChunk.java
+++ b/src/main/java/net/minecraft/world/level/chunk/ImposterProtoChunk.java
@@ -31,6 +31,48 @@ public class ImposterProtoChunk extends ProtoChunk {
@@ -5011,13 +5040,13 @@ index 7b320357973202423c29743d922b72dc4ec11efe..8ffc206a858864d277ff94de7c66ffdb
super(wrapped.getPos(), UpgradeData.EMPTY, wrapped.levelHeightAccessor, wrapped.getLevel().registryAccess().registryOrThrow(Registry.BIOME_REGISTRY), wrapped.getBlendingData());
this.wrapped = wrapped;
diff --git a/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java b/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java
-index c85380c3bf3bf4448a28a91af78f41c235a583e4..d870cefbe5b7485f423817f4f639e3e2a304640c 100644
+index b322d9b7bd9e107a9adf995b6c4db4ff0af05fc1..e75ec8f6aa597b5f3048d6269fba45eef057bc71 100644
--- a/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java
+++ b/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java
-@@ -100,6 +100,10 @@ public class LevelChunk extends ChunkAccess {
+@@ -93,6 +93,10 @@ public class LevelChunk extends ChunkAccess {
public LevelChunk(Level world, ChunkPos pos, UpgradeData upgradeData, LevelChunkTicks blockTickScheduler, LevelChunkTicks fluidTickScheduler, long inhabitedTime, @Nullable LevelChunkSection[] sectionArrayInitializer, @Nullable LevelChunk.PostLoadProcessor entityLoader, @Nullable BlendingData blendingData) {
- super(pos, upgradeData, world, net.minecraft.server.MinecraftServer.getServer().registryAccess().registryOrThrow(Registry.BIOME_REGISTRY), inhabitedTime, sectionArrayInitializer, blendingData); // Paper - Anti-Xray - The world isn't ready yet, use server singleton for registry
+ super(pos, upgradeData, world, world.registryAccess().registryOrThrow(Registry.BIOME_REGISTRY), inhabitedTime, sectionArrayInitializer, blendingData);
+ // Paper start - rewrite light engine
+ this.setBlockNibbles(ca.spottedleaf.starlight.common.light.StarLightEngine.getFilledEmptyLight(world));
+ this.setSkyNibbles(ca.spottedleaf.starlight.common.light.StarLightEngine.getFilledEmptyLight(world));
@@ -5025,7 +5054,7 @@ index c85380c3bf3bf4448a28a91af78f41c235a583e4..d870cefbe5b7485f423817f4f639e3e2
this.tickersInLevel = Maps.newHashMap();
this.clientLightReady = false;
this.level = (ServerLevel) world; // CraftBukkit - type
-@@ -330,6 +334,12 @@ public class LevelChunk extends ChunkAccess {
+@@ -230,6 +234,12 @@ public class LevelChunk extends ChunkAccess {
public LevelChunk(ServerLevel world, ProtoChunk protoChunk, @Nullable LevelChunk.PostLoadProcessor entityLoader) {
this(world, protoChunk.getPos(), protoChunk.getUpgradeData(), protoChunk.unpackBlockTicks(), protoChunk.unpackFluidTicks(), protoChunk.getInhabitedTime(), protoChunk.getSections(), entityLoader, protoChunk.getBlendingData());
@@ -5039,10 +5068,10 @@ index c85380c3bf3bf4448a28a91af78f41c235a583e4..d870cefbe5b7485f423817f4f639e3e2
while (iterator.hasNext()) {
diff --git a/src/main/java/net/minecraft/world/level/chunk/PalettedContainer.java b/src/main/java/net/minecraft/world/level/chunk/PalettedContainer.java
-index 5ebde3a4f99b8d017d9a10a30fefc0b7dd011319..7908360dd47937b2cb702e381802b7b278a5198e 100644
+index 78e20871e4bd8d92c4475f797a55733c68f6aeb4..33eecdac9d844af2f70aad97c4788b138dab8896 100644
--- a/src/main/java/net/minecraft/world/level/chunk/PalettedContainer.java
+++ b/src/main/java/net/minecraft/world/level/chunk/PalettedContainer.java
-@@ -203,7 +203,7 @@ public class PalettedContainer implements PaletteResize, PalettedContainer
+@@ -142,7 +142,7 @@ public class PalettedContainer implements PaletteResize, PalettedContainer
return this.get(this.strategy.getIndex(x, y, z));
}
@@ -5052,7 +5081,7 @@ index 5ebde3a4f99b8d017d9a10a30fefc0b7dd011319..7908360dd47937b2cb702e381802b7b2
return data.palette.valueFor(data.storage.get(index));
}
diff --git a/src/main/java/net/minecraft/world/level/chunk/ProtoChunk.java b/src/main/java/net/minecraft/world/level/chunk/ProtoChunk.java
-index 9014331e4ceac9f77a911aead87bf452d29e3fb4..13b62e8e6569c154547bc0d5626488c5b0839f20 100644
+index 603111a52346f678aba0fd66b010d8f3026fce40..040c6092ceed4c693a7a056c0d1a49d3d2242b19 100644
--- a/src/main/java/net/minecraft/world/level/chunk/ProtoChunk.java
+++ b/src/main/java/net/minecraft/world/level/chunk/ProtoChunk.java
@@ -55,6 +55,12 @@ public class ProtoChunk extends ChunkAccess {
@@ -5069,10 +5098,10 @@ index 9014331e4ceac9f77a911aead87bf452d29e3fb4..13b62e8e6569c154547bc0d5626488c5
this.fluidTicks = fluidTickScheduler;
}
diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/ChunkSerializer.java b/src/main/java/net/minecraft/world/level/chunk/storage/ChunkSerializer.java
-index be9c15fe141ede1132dbe07ba4bfcf22036ab194..4df5853781a2ac89dd391374d34d9096643a2ab8 100644
+index 864e2e0355a5fb8c1d4a5b0896ba299faf9ea534..2dead743775df9b261bdcdca30df9b672c6acc8b 100644
--- a/src/main/java/net/minecraft/world/level/chunk/storage/ChunkSerializer.java
+++ b/src/main/java/net/minecraft/world/level/chunk/storage/ChunkSerializer.java
-@@ -94,6 +94,14 @@ public class ChunkSerializer {
+@@ -82,6 +82,14 @@ public class ChunkSerializer {
public static final String BLOCK_LIGHT_TAG = "BlockLight";
public static final String SKY_LIGHT_TAG = "SkyLight";
@@ -5086,13 +5115,13 @@ index be9c15fe141ede1132dbe07ba4bfcf22036ab194..4df5853781a2ac89dd391374d34d9096
+
public ChunkSerializer() {}
- // Paper start - guard against serializing mismatching coordinates
-@@ -153,13 +161,20 @@ public class ChunkSerializer {
+ public static ProtoChunk read(ServerLevel world, PoiManager poiStorage, ChunkPos chunkPos, CompoundTag nbt) {
+@@ -92,13 +100,20 @@ public class ChunkSerializer {
}
UpgradeData chunkconverter = nbt.contains("UpgradeData", 10) ? new UpgradeData(nbt.getCompound("UpgradeData"), world) : UpgradeData.EMPTY;
- boolean flag = nbt.getBoolean("isLightOn");
-+ boolean flag = getStatus(nbt).isOrAfter(ChunkStatus.LIGHT) && nbt.get("isLightOn") != null && nbt.getInt(STARLIGHT_VERSION_TAG) == STARLIGHT_LIGHT_VERSION; // Paper
++ boolean flag = getStatus(nbt) != null && getStatus(nbt).isOrAfter(ChunkStatus.LIGHT) && nbt.get("isLightOn") != null && nbt.getInt(STARLIGHT_VERSION_TAG) == STARLIGHT_LIGHT_VERSION; // Paper
ListTag nbttaglist = nbt.getList("sections", 10);
int i = world.getSectionsCount();
LevelChunkSection[] achunksection = new LevelChunkSection[i];
@@ -5100,8 +5129,8 @@ index be9c15fe141ede1132dbe07ba4bfcf22036ab194..4df5853781a2ac89dd391374d34d9096
ServerChunkCache chunkproviderserver = world.getChunkSource();
LevelLightEngine lightengine = chunkproviderserver.getLightEngine();
+ // Paper start
-+ ca.spottedleaf.starlight.common.light.SWMRNibbleArray[] blockNibbles = ca.spottedleaf.starlight.common.light.StarLightEngine.getFilledEmptyLight(world); // Paper - replace light impl
-+ ca.spottedleaf.starlight.common.light.SWMRNibbleArray[] skyNibbles = ca.spottedleaf.starlight.common.light.StarLightEngine.getFilledEmptyLight(world); // Paper - replace light impl
++ ca.spottedleaf.starlight.common.light.SWMRNibbleArray[] blockNibbles = ca.spottedleaf.starlight.common.light.StarLightEngine.getFilledEmptyLight(world);
++ ca.spottedleaf.starlight.common.light.SWMRNibbleArray[] skyNibbles = ca.spottedleaf.starlight.common.light.StarLightEngine.getFilledEmptyLight(world);
+ final int minSection = io.papermc.paper.util.WorldUtil.getMinLightSection(world);
+ final int maxSection = io.papermc.paper.util.WorldUtil.getMaxLightSection(world);
+ boolean canReadSky = world.dimensionType().hasSkyLight();
@@ -5109,7 +5138,7 @@ index be9c15fe141ede1132dbe07ba4bfcf22036ab194..4df5853781a2ac89dd391374d34d9096
Registry iregistry = world.registryAccess().registryOrThrow(Registry.BIOME_REGISTRY);
Codec>> codec = ChunkSerializer.makeBiomeCodecRW(iregistry); // CraftBukkit - read/write
boolean flag2 = false;
-@@ -167,7 +182,7 @@ public class ChunkSerializer {
+@@ -106,7 +121,7 @@ public class ChunkSerializer {
DataResult dataresult;
for (int j = 0; j < nbttaglist.size(); ++j) {
@@ -5118,46 +5147,33 @@ index be9c15fe141ede1132dbe07ba4bfcf22036ab194..4df5853781a2ac89dd391374d34d9096
byte b0 = nbttagcompound1.getByte("Y");
int k = world.getSectionIndexFromSectionY(b0);
-@@ -214,31 +229,45 @@ public class ChunkSerializer {
+@@ -147,19 +162,39 @@ public class ChunkSerializer {
boolean flag3 = nbttagcompound1.contains("BlockLight", 7);
boolean flag4 = flag1 && nbttagcompound1.contains("SkyLight", 7);
- if (flag3 || flag4) {
- if (!flag2) {
+- lightengine.retainData(chunkPos, true);
+- flag2 = true;
+- }
+-
+ // Paper start - rewrite the light engine
+ if (flag) {
+ try {
-+ if ((flag3 || flag4) && !flag2) {
-+ // Paper end - rewrite the light engine
- tasksToExecuteOnMain.add(() -> { // Paper - delay this task since we're executing off-main
- lightengine.retainData(chunkPos, true);
- }); // Paper - delay this task since we're executing off-main
- flag2 = true;
- }
-
+ int y = sectionData.getByte("Y");
++ // Paper end - rewrite the light engine
if (flag3) {
-- // Paper start - delay this task since we're executing off-main
-- DataLayer blockLight = new DataLayer(nbttagcompound1.getByteArray("BlockLight").clone());
-- tasksToExecuteOnMain.add(() -> {
-- lightengine.queueSectionData(LightLayer.BLOCK, SectionPos.of(chunkPos, b0), blockLight, true);
-- });
-- // Paper end - delay this task since we're executing off-main
+- lightengine.queueSectionData(LightLayer.BLOCK, SectionPos.of(chunkPos, b0), new DataLayer(nbttagcompound1.getByteArray("BlockLight")), true);
+ // Paper start - rewrite the light engine
+ // this is where our diff is
+ blockNibbles[y - minSection] = new ca.spottedleaf.starlight.common.light.SWMRNibbleArray(sectionData.getByteArray("BlockLight").clone(), sectionData.getInt(BLOCKLIGHT_STATE_TAG)); // clone for data safety
+ } else {
+ blockNibbles[y - minSection] = new ca.spottedleaf.starlight.common.light.SWMRNibbleArray(null, sectionData.getInt(BLOCKLIGHT_STATE_TAG));
++ // Paper end - rewrite the light engine
}
-+ // Paper end - rewrite the light engine
if (flag4) {
-- // Paper start - delay this task since we're executing off-main
-- DataLayer skyLight = new DataLayer(nbttagcompound1.getByteArray("SkyLight").clone());
-- tasksToExecuteOnMain.add(() -> {
-- lightengine.queueSectionData(LightLayer.SKY, SectionPos.of(chunkPos, b0), skyLight, true);
-- });
-- // Paper end - delay this task since we're executing off-mai
+- lightengine.queueSectionData(LightLayer.SKY, SectionPos.of(chunkPos, b0), new DataLayer(nbttagcompound1.getByteArray("SkyLight")), true);
+ // Paper start - rewrite the light engine
+ // we store under the same key so mod programs editing nbt
+ // can still read the data, hopefully.
@@ -5167,18 +5183,19 @@ index be9c15fe141ede1132dbe07ba4bfcf22036ab194..4df5853781a2ac89dd391374d34d9096
+ skyNibbles[y - minSection] = new ca.spottedleaf.starlight.common.light.SWMRNibbleArray(sectionData.getByteArray("SkyLight").clone(), sectionData.getInt(SKYLIGHT_STATE_TAG)); // clone for data safety
+ } else if (flag1) {
+ skyNibbles[y - minSection] = new ca.spottedleaf.starlight.common.light.SWMRNibbleArray(null, sectionData.getInt(SKYLIGHT_STATE_TAG));
- }
-+ // Paper end - rewrite the light engine
++ // Paper end - rewrite the light engine
++ }
++
+ // Paper start - rewrite the light engine
+ } catch (Exception ex) {
+ LOGGER.warn("Failed to load light data for chunk " + chunkPos + " in world '" + world.getWorld().getName() + "', light will be regenerated", ex);
+ flag = false;
-+ }
+ }
+ // Paper end - rewrite light engine
}
}
-@@ -267,6 +296,8 @@ public class ChunkSerializer {
+@@ -188,6 +223,8 @@ public class ChunkSerializer {
}, chunkPos);
object1 = new LevelChunk(world.getLevel(), chunkPos, chunkconverter, levelchunkticks, levelchunkticks1, l, achunksection, ChunkSerializer.postLoadChunk(world, nbt), blendingdata);
@@ -5187,7 +5204,7 @@ index be9c15fe141ede1132dbe07ba4bfcf22036ab194..4df5853781a2ac89dd391374d34d9096
} else {
ProtoChunkTicks protochunkticklist = ProtoChunkTicks.load(nbt.getList("block_ticks", 10), (s) -> {
return Registry.BLOCK.getOptional(ResourceLocation.tryParse(s));
-@@ -275,6 +306,8 @@ public class ChunkSerializer {
+@@ -196,6 +233,8 @@ public class ChunkSerializer {
return Registry.FLUID.getOptional(ResourceLocation.tryParse(s));
}, chunkPos);
ProtoChunk protochunk = new ProtoChunk(chunkPos, chunkconverter, achunksection, protochunkticklist, protochunkticklist1, world, iregistry, blendingdata);
@@ -5196,19 +5213,10 @@ index be9c15fe141ede1132dbe07ba4bfcf22036ab194..4df5853781a2ac89dd391374d34d9096
object1 = protochunk;
protochunk.setInhabitedTime(l);
-@@ -420,7 +453,7 @@ public class ChunkSerializer {
- DataLayer[] blockLight = new DataLayer[lightenginethreaded.getMaxLightSection() - lightenginethreaded.getMinLightSection()];
- DataLayer[] skyLight = new DataLayer[lightenginethreaded.getMaxLightSection() - lightenginethreaded.getMinLightSection()];
+@@ -336,6 +375,12 @@ public class ChunkSerializer {
+ // CraftBukkit end
-- for (int i = lightenginethreaded.getMinLightSection(); i < lightenginethreaded.getMaxLightSection(); ++i) {
-+ for (int i = lightenginethreaded.getMinLightSection(); false && i < lightenginethreaded.getMaxLightSection(); ++i) { // Paper - don't run loop, we don't need to - light data is per chunk now
- DataLayer blockArray = lightenginethreaded.getLayerListener(LightLayer.BLOCK).getDataLayerData(SectionPos.of(chunkPos, i));
- DataLayer skyArray = lightenginethreaded.getLayerListener(LightLayer.SKY).getDataLayerData(SectionPos.of(chunkPos, i));
-
-@@ -478,6 +511,12 @@ public class ChunkSerializer {
- }
- public static CompoundTag saveChunk(ServerLevel world, ChunkAccess chunk, @org.checkerframework.checker.nullness.qual.Nullable AsyncSaveData asyncsavedata) {
- // Paper end
+ public static CompoundTag write(ServerLevel world, ChunkAccess chunk) {
+ // Paper start - rewrite light impl
+ final int minSection = io.papermc.paper.util.WorldUtil.getMinLightSection(world);
+ final int maxSection = io.papermc.paper.util.WorldUtil.getMaxLightSection(world);
@@ -5218,21 +5226,12 @@ index be9c15fe141ede1132dbe07ba4bfcf22036ab194..4df5853781a2ac89dd391374d34d9096
ChunkPos chunkcoordintpair = chunk.getPos();
CompoundTag nbttagcompound = new CompoundTag();
-@@ -528,20 +567,14 @@ public class ChunkSerializer {
+@@ -386,11 +431,14 @@ public class ChunkSerializer {
for (int i = lightenginethreaded.getMinLightSection(); i < lightenginethreaded.getMaxLightSection(); ++i) {
int j = chunk.getSectionIndexFromSectionY(i);
boolean flag1 = j >= 0 && j < achunksection.length;
-- // Paper start - async chunk save for unload
-- DataLayer nibblearray; // block light
-- DataLayer nibblearray1; // sky light
-- if (asyncsavedata == null) {
-- nibblearray = lightenginethreaded.getLayerListener(LightLayer.BLOCK).getDataLayerData(SectionPos.of(chunkcoordintpair, i)); /// Paper - diff on method change (see getAsyncSaveData)
-- nibblearray1 = lightenginethreaded.getLayerListener(LightLayer.SKY).getDataLayerData(SectionPos.of(chunkcoordintpair, i)); // Paper - diff on method change (see getAsyncSaveData)
-- } else {
-- nibblearray = asyncsavedata.blockLight[i - lightenginethreaded.getMinLightSection()];
-- nibblearray1 = asyncsavedata.skyLight[i - lightenginethreaded.getMinLightSection()];
-- }
-- // Paper end
+- DataLayer nibblearray = lightenginethreaded.getLayerListener(LightLayer.BLOCK).getDataLayerData(SectionPos.of(chunkcoordintpair, i));
+- DataLayer nibblearray1 = lightenginethreaded.getLayerListener(LightLayer.SKY).getDataLayerData(SectionPos.of(chunkcoordintpair, i));
+ // Paper - replace light engine
- if (flag1 || nibblearray != null || nibblearray1 != null) {
@@ -5246,7 +5245,7 @@ index be9c15fe141ede1132dbe07ba4bfcf22036ab194..4df5853781a2ac89dd391374d34d9096
if (flag1) {
LevelChunkSection chunksection = achunksection[j];
-@@ -556,13 +589,27 @@ public class ChunkSerializer {
+@@ -405,13 +453,27 @@ public class ChunkSerializer {
nbttagcompound1.put("biomes", (Tag) dataresult1.getOrThrow(false, logger1::error));
}
@@ -5278,7 +5277,7 @@ index be9c15fe141ede1132dbe07ba4bfcf22036ab194..4df5853781a2ac89dd391374d34d9096
if (!nbttagcompound1.isEmpty()) {
nbttagcompound1.putByte("Y", (byte) i);
-@@ -573,7 +620,8 @@ public class ChunkSerializer {
+@@ -422,7 +484,8 @@ public class ChunkSerializer {
nbttagcompound.put("sections", nbttaglist);
if (flag) {
@@ -5287,4 +5286,22 @@ index be9c15fe141ede1132dbe07ba4bfcf22036ab194..4df5853781a2ac89dd391374d34d9096
+ nbttagcompound.putBoolean("isLightOn", false); // Paper - set to false but still store, this allows us to detect --eraseCache (as eraseCache _removes_)
}
- // Paper start
+ ListTag nbttaglist1 = new ListTag();
+@@ -497,6 +560,17 @@ public class ChunkSerializer {
+ }));
+ }
+
++ // Paper start
++ public static @Nullable ChunkStatus getStatus(@Nullable CompoundTag compound) {
++ if (compound == null) {
++ return null;
++ }
++
++ // Note: Copied from below
++ return ChunkStatus.getStatus(compound.getString("Status"));
++ }
++ // Paper end
++
+ public static ChunkStatus.ChunkType getChunkTypeFromTag(@Nullable CompoundTag nbt) {
+ return nbt != null ? ChunkStatus.byName(nbt.getString("Status")).getChunkType() : ChunkStatus.ChunkType.PROTOCHUNK;
+ }
diff --git a/patches/server/0013-Not-implemeneted.patch b/patches/server/0015-Not-implemeneted.patch
similarity index 100%
rename from patches/server/0013-Not-implemeneted.patch
rename to patches/server/0015-Not-implemeneted.patch
diff --git a/patches/server/0016-Rewrite-chunk-system.patch b/patches/server/0016-Rewrite-chunk-system.patch
new file mode 100644
index 0000000000..3f2585cc65
--- /dev/null
+++ b/patches/server/0016-Rewrite-chunk-system.patch
@@ -0,0 +1,18056 @@
+From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
+From: Spottedleaf
+Date: Thu, 11 Mar 2021 02:32:30 -0800
+Subject: [PATCH] Rewrite chunk system
+
+
+diff --git a/src/main/java/ca/spottedleaf/starlight/common/light/StarLightInterface.java b/src/main/java/ca/spottedleaf/starlight/common/light/StarLightInterface.java
+index ef8dcbb6bbc0769e9ccfdadb05e6a46c070eda98..f6dfaaa0ccd8caeb4bd4b94254aebe7e96732f12 100644
+--- a/src/main/java/ca/spottedleaf/starlight/common/light/StarLightInterface.java
++++ b/src/main/java/ca/spottedleaf/starlight/common/light/StarLightInterface.java
+@@ -41,14 +41,14 @@ public final class StarLightInterface {
+ protected final ArrayDeque cachedSkyPropagators;
+ protected final ArrayDeque cachedBlockPropagators;
+
+- protected final LightQueue lightQueue = new LightQueue(this);
++ public final io.papermc.paper.chunk.system.light.LightQueue lightQueue; // Paper - replace light queue
+
+ protected final LayerLightEventListener skyReader;
+ protected final LayerLightEventListener blockReader;
+ protected final boolean isClientSide;
+
+- protected final int minSection;
+- protected final int maxSection;
++ public final int minSection; // Paper - public
++ public final int maxSection; // Paper - public
+ protected final int minLightSection;
+ protected final int maxLightSection;
+
+@@ -182,6 +182,7 @@ public final class StarLightInterface {
+ StarLightInterface.this.sectionChange(pos, notReady);
+ }
+ };
++ this.lightQueue = new io.papermc.paper.chunk.system.light.LightQueue(this); // Paper - replace light queue
+ }
+
+ protected int getSkyLightValue(final BlockPos blockPos, final ChunkAccess chunk) {
+@@ -325,7 +326,7 @@ public final class StarLightInterface {
+ return this.lightAccess;
+ }
+
+- protected final SkyStarLightEngine getSkyLightEngine() {
++ public final SkyStarLightEngine getSkyLightEngine() { // Paper - public
+ if (this.cachedSkyPropagators == null) {
+ return null;
+ }
+@@ -340,7 +341,7 @@ public final class StarLightInterface {
+ return ret;
+ }
+
+- protected final void releaseSkyLightEngine(final SkyStarLightEngine engine) {
++ public final void releaseSkyLightEngine(final SkyStarLightEngine engine) { // Paper - public
+ if (this.cachedSkyPropagators == null) {
+ return;
+ }
+@@ -349,7 +350,7 @@ public final class StarLightInterface {
+ }
+ }
+
+- protected final BlockStarLightEngine getBlockLightEngine() {
++ public final BlockStarLightEngine getBlockLightEngine() { // Paper - public
+ if (this.cachedBlockPropagators == null) {
+ return null;
+ }
+@@ -364,7 +365,7 @@ public final class StarLightInterface {
+ return ret;
+ }
+
+- protected final void releaseBlockLightEngine(final BlockStarLightEngine engine) {
++ public final void releaseBlockLightEngine(final BlockStarLightEngine engine) { // Paper - public
+ if (this.cachedBlockPropagators == null) {
+ return;
+ }
+@@ -511,57 +512,15 @@ public final class StarLightInterface {
+ }
+
+ public void scheduleChunkLight(final ChunkPos pos, final Runnable run) {
+- this.lightQueue.queueChunkLighting(pos, run);
++ throw new UnsupportedOperationException("No longer implemented, use the new lightQueue field to queue tasks"); // Paper - replace light queue
+ }
+
+ public void removeChunkTasks(final ChunkPos pos) {
+- this.lightQueue.removeChunk(pos);
++ throw new UnsupportedOperationException("No longer implemented, use the new lightQueue field to queue tasks"); // Paper - replace light queue
+ }
+
+ public void propagateChanges() {
+- if (this.lightQueue.isEmpty()) {
+- return;
+- }
+-
+- final SkyStarLightEngine skyEngine = this.getSkyLightEngine();
+- final BlockStarLightEngine blockEngine = this.getBlockLightEngine();
+-
+- try {
+- LightQueue.ChunkTasks task;
+- while ((task = this.lightQueue.removeFirstTask()) != null) {
+- if (task.lightTasks != null) {
+- for (final Runnable run : task.lightTasks) {
+- run.run();
+- }
+- }
+-
+- final long coordinate = task.chunkCoordinate;
+- final int chunkX = CoordinateUtils.getChunkX(coordinate);
+- final int chunkZ = CoordinateUtils.getChunkZ(coordinate);
+-
+- final Set positions = task.changedPositions;
+- final Boolean[] sectionChanges = task.changedSectionSet;
+-
+- if (skyEngine != null && (!positions.isEmpty() || sectionChanges != null)) {
+- skyEngine.blocksChangedInChunk(this.lightAccess, chunkX, chunkZ, positions, sectionChanges);
+- }
+- if (blockEngine != null && (!positions.isEmpty() || sectionChanges != null)) {
+- blockEngine.blocksChangedInChunk(this.lightAccess, chunkX, chunkZ, positions, sectionChanges);
+- }
+-
+- if (skyEngine != null && task.queuedEdgeChecksSky != null) {
+- skyEngine.checkChunkEdges(this.lightAccess, chunkX, chunkZ, task.queuedEdgeChecksSky);
+- }
+- if (blockEngine != null && task.queuedEdgeChecksBlock != null) {
+- blockEngine.checkChunkEdges(this.lightAccess, chunkX, chunkZ, task.queuedEdgeChecksBlock);
+- }
+-
+- task.onComplete.complete(null);
+- }
+- } finally {
+- this.releaseSkyLightEngine(skyEngine);
+- this.releaseBlockLightEngine(blockEngine);
+- }
++ throw new UnsupportedOperationException("No longer implemented, task draining is now performed by the light thread"); // Paper - replace light queue
+ }
+
+ protected static final class LightQueue {
+diff --git a/src/main/java/co/aikar/timings/TimingsExport.java b/src/main/java/co/aikar/timings/TimingsExport.java
+index 46297ac0a19fd2398ab777a381eff4d0a256161e..98171f6c8e23f6ef89b897e4b80e3afb2a1950a0 100644
+--- a/src/main/java/co/aikar/timings/TimingsExport.java
++++ b/src/main/java/co/aikar/timings/TimingsExport.java
+@@ -162,7 +162,11 @@ public class TimingsExport extends Thread {
+ pair("gamerules", toObjectMapper(world.getWorld().getGameRules(), rule -> {
+ return pair(rule, world.getWorld().getGameRuleValue(rule));
+ })),
+- pair("ticking-distance", world.getChunkSource().chunkMap.getEffectiveViewDistance())
++ // Paper start - replace chunk loader system
++ pair("ticking-distance", world.getChunkSource().chunkMap.playerChunkManager.getTargetTickViewDistance()),
++ pair("no-ticking-distance", world.getChunkSource().chunkMap.playerChunkManager.getTargetNoTickViewDistance()),
++ pair("sending-distance", world.getChunkSource().chunkMap.playerChunkManager.getTargetSendDistance())
++ // Paper end - replace chunk loader system
+ ));
+ }));
+
+diff --git a/src/main/java/co/aikar/timings/WorldTimingsHandler.java b/src/main/java/co/aikar/timings/WorldTimingsHandler.java
+index 0fda52841b5e1643efeda92106124998abc4e0aa..fe79c0add4f7cb18d487c5bb9415c40c5b551ea2 100644
+--- a/src/main/java/co/aikar/timings/WorldTimingsHandler.java
++++ b/src/main/java/co/aikar/timings/WorldTimingsHandler.java
+@@ -58,6 +58,16 @@ public class WorldTimingsHandler {
+
+ public final Timing miscMobSpawning;
+
++ public final Timing poiUnload;
++ public final Timing chunkUnload;
++ public final Timing poiSaveDataSerialization;
++ public final Timing chunkSave;
++ public final Timing chunkSaveDataSerialization;
++ public final Timing chunkSaveIOWait;
++ public final Timing chunkUnloadPrepareSave;
++ public final Timing chunkUnloadPOISerialization;
++ public final Timing chunkUnloadDataSave;
++
+ public WorldTimingsHandler(Level server) {
+ String name = ((PrimaryLevelData) server.getLevelData()).getLevelName() + " - ";
+
+@@ -111,6 +121,16 @@ public class WorldTimingsHandler {
+
+
+ miscMobSpawning = Timings.ofSafe(name + "Mob spawning - Misc");
++
++ poiUnload = Timings.ofSafe(name + "Chunk unload - POI");
++ chunkUnload = Timings.ofSafe(name + "Chunk unload - Chunk");
++ poiSaveDataSerialization = Timings.ofSafe(name + "Chunk save - POI Data serialization");
++ chunkSave = Timings.ofSafe(name + "Chunk save - Chunk");
++ chunkSaveDataSerialization = Timings.ofSafe(name + "Chunk save - Chunk Data serialization");
++ chunkSaveIOWait = Timings.ofSafe(name + "Chunk save - Chunk IO Wait");
++ chunkUnloadPrepareSave = Timings.ofSafe(name + "Chunk unload - Async Save Prepare");
++ chunkUnloadPOISerialization = Timings.ofSafe(name + "Chunk unload - POI Data Serialization");
++ chunkUnloadDataSave = Timings.ofSafe(name + "Chunk unload - Data Serialization");
+ }
+
+ public static Timing getTickList(ServerLevel worldserver, String timingsType) {
+diff --git a/src/main/java/com/destroystokyo/paper/io/IOUtil.java b/src/main/java/com/destroystokyo/paper/io/IOUtil.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..e064f96c90afd1a4890060baa055cfd0469b6a6f
+--- /dev/null
++++ b/src/main/java/com/destroystokyo/paper/io/IOUtil.java
+@@ -0,0 +1,63 @@
++package com.destroystokyo.paper.io;
++
++import org.bukkit.Bukkit;
++
++@Deprecated(forRemoval = true)
++public final class IOUtil {
++
++ /* Copied from concrete or concurrentutil */
++
++ public static long getCoordinateKey(final int x, final int z) {
++ return ((long)z << 32) | (x & 0xFFFFFFFFL);
++ }
++
++ public static int getCoordinateX(final long key) {
++ return (int)key;
++ }
++
++ public static int getCoordinateZ(final long key) {
++ return (int)(key >>> 32);
++ }
++
++ public static int getRegionCoordinate(final int chunkCoordinate) {
++ return chunkCoordinate >> 5;
++ }
++
++ public static int getChunkInRegion(final int chunkCoordinate) {
++ return chunkCoordinate & 31;
++ }
++
++ public static String genericToString(final Object object) {
++ return object == null ? "null" : object.getClass().getName() + ":" + object.toString();
++ }
++
++ public static T notNull(final T obj) {
++ if (obj == null) {
++ throw new NullPointerException();
++ }
++ return obj;
++ }
++
++ public static T notNull(final T obj, final String msgIfNull) {
++ if (obj == null) {
++ throw new NullPointerException(msgIfNull);
++ }
++ return obj;
++ }
++
++ public static void arrayBounds(final int off, final int len, final int arrayLength, final String msgPrefix) {
++ if (off < 0 || len < 0 || (arrayLength - off) < len) {
++ throw new ArrayIndexOutOfBoundsException(msgPrefix + ": off: " + off + ", len: " + len + ", array length: " + arrayLength);
++ }
++ }
++
++ public static int getPriorityForCurrentThread() {
++ return Bukkit.isPrimaryThread() ? PrioritizedTaskQueue.HIGHEST_PRIORITY : PrioritizedTaskQueue.NORMAL_PRIORITY;
++ }
++
++ @SuppressWarnings("unchecked")
++ public static void rethrow(final Throwable throwable) throws T {
++ throw (T)throwable;
++ }
++
++}
+diff --git a/src/main/java/com/destroystokyo/paper/io/PaperFileIOThread.java b/src/main/java/com/destroystokyo/paper/io/PaperFileIOThread.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..f2c27e0ac65be4b75c1d86ef6fd45fdb538d96ac
+--- /dev/null
++++ b/src/main/java/com/destroystokyo/paper/io/PaperFileIOThread.java
+@@ -0,0 +1,474 @@
++package com.destroystokyo.paper.io;
++
++import com.mojang.logging.LogUtils;
++import net.minecraft.nbt.CompoundTag;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.world.level.ChunkPos;
++import net.minecraft.world.level.chunk.storage.RegionFile;
++import org.slf4j.Logger;
++
++import java.io.IOException;
++import java.util.concurrent.CompletableFuture;
++import java.util.concurrent.ConcurrentHashMap;
++import java.util.concurrent.atomic.AtomicLong;
++import java.util.function.Consumer;
++import java.util.function.Function;
++
++/**
++ * Prioritized singleton thread responsible for all chunk IO that occurs in a minecraft server.
++ *
++ *
++ * Singleton access: {@link Holder#INSTANCE}
++ *
++ *
++ *
++ * All functions provided are MT-Safe, however certain ordering constraints are (but not enforced):
++ *
++ * Chunk saves may not occur for unloaded chunks.
++ *
++ *
++ * Tasks must be scheduled on the main thread.
++ *
++ *
++ *
++ * @see Holder#INSTANCE
++ * @see #scheduleSave(ServerLevel, int, int, CompoundTag, CompoundTag, int)
++ * @see #loadChunkDataAsync(ServerLevel, int, int, int, Consumer, boolean, boolean, boolean)
++ * @deprecated
++ */
++@Deprecated(forRemoval = true)
++public final class PaperFileIOThread extends QueueExecutorThread {
++
++ public static final Logger LOGGER = LogUtils.getLogger();
++ public static final CompoundTag FAILURE_VALUE = new CompoundTag();
++
++ public static final class Holder {
++
++ public static final PaperFileIOThread INSTANCE = new PaperFileIOThread();
++
++ static {
++ // Paper - fail hard on usage
++ }
++ }
++
++ private final AtomicLong writeCounter = new AtomicLong();
++
++ private PaperFileIOThread() {
++ super(new PrioritizedTaskQueue<>(), (int)(1.0e6)); // 1.0ms spinwait time
++ this.setName("Paper RegionFile IO Thread");
++ this.setPriority(Thread.NORM_PRIORITY - 1); // we keep priority close to normal because threads can wait on us
++ this.setUncaughtExceptionHandler((final Thread unused, final Throwable thr) -> {
++ LOGGER.error("Uncaught exception thrown from IO thread, report this!", thr);
++ });
++ }
++
++ /* run() is implemented by superclass */
++
++ /*
++ *
++ * IO thread will perform reads before writes
++ *
++ * How reads/writes are scheduled:
++ *
++ * If read in progress while scheduling write, ignore read and schedule write
++ * If read in progress while scheduling read (no write in progress), chain the read task
++ *
++ *
++ * If write in progress while scheduling read, use the pending write data and ret immediately
++ * If write in progress while scheduling write (ignore read in progress), overwrite the write in progress data
++ *
++ * This allows the reads and writes to act as if they occur synchronously to the thread scheduling them, however
++ * it fails to properly propagate write failures. When writes fail the data is kept so future reads will actually
++ * read the failed write data. This should hopefully act as a way to prevent data loss for spurious fails for writing data.
++ *
++ */
++
++ /**
++ * Attempts to bump the priority of all IO tasks for the given chunk coordinates. This has no effect if no tasks are queued.
++ * @param world Chunk's world
++ * @param chunkX Chunk's x coordinate
++ * @param chunkZ Chunk's z coordinate
++ * @param priority Priority level to try to bump to
++ */
++ public void bumpPriority(final ServerLevel world, final int chunkX, final int chunkZ, final int priority) {
++ throw new IllegalStateException("Shouldn't get here, use RegionFileIOThread"); // Paper - rewrite chunk system, fail hard on usage
++ }
++
++ public CompoundTag getPendingWrite(final ServerLevel world, final int chunkX, final int chunkZ, final boolean poiData) {
++ // Paper start - rewrite chunk system
++ return io.papermc.paper.chunk.system.io.RegionFileIOThread.getPendingWrite(
++ world, chunkX, chunkZ, poiData ? io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.POI_DATA :
++ io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.CHUNK_DATA
++ );
++ // Paper end - rewrite chunk system
++ }
++
++ /**
++ * Sets the priority of all IO tasks for the given chunk coordinates. This has no effect if no tasks are queued.
++ * @param world Chunk's world
++ * @param chunkX Chunk's x coordinate
++ * @param chunkZ Chunk's z coordinate
++ * @param priority Priority level to set to
++ */
++ public void setPriority(final ServerLevel world, final int chunkX, final int chunkZ, final int priority) {
++ throw new IllegalStateException("Shouldn't get here, use RegionFileIOThread"); // Paper - rewrite chunk system, fail hard on usage
++ }
++
++ /**
++ * Schedules the chunk data to be written asynchronously.
++ *
++ * Impl notes:
++ *
++ *
++ * This function presumes a chunk load for the coordinates is not called during this function (anytime after is OK). This means
++ * saves must be scheduled before a chunk is unloaded.
++ *
++ *
++ * Writes may be called concurrently, although only the "later" write will go through.
++ *
++ * @param world Chunk's world
++ * @param chunkX Chunk's x coordinate
++ * @param chunkZ Chunk's z coordinate
++ * @param poiData Chunk point of interest data. If {@code null}, then no poi data is saved.
++ * @param chunkData Chunk data. If {@code null}, then no chunk data is saved.
++ * @param priority Priority level for this task. See {@link PrioritizedTaskQueue}
++ * @throws IllegalArgumentException If both {@code poiData} and {@code chunkData} are {@code null}.
++ * @throws IllegalStateException If the file io thread has shutdown.
++ */
++ public void scheduleSave(final ServerLevel world, final int chunkX, final int chunkZ,
++ final CompoundTag poiData, final CompoundTag chunkData,
++ final int priority) throws IllegalArgumentException {
++ throw new IllegalStateException("Shouldn't get here, use RegionFileIOThread"); // Paper - rewrite chunk system, fail hard on usage
++ }
++
++ private void scheduleWrite(final ChunkDataController dataController, final ServerLevel world,
++ final int chunkX, final int chunkZ, final CompoundTag data, final int priority, final long writeCounter) {
++ throw new IllegalStateException("Shouldn't get here, use RegionFileIOThread"); // Paper - rewrite chunk system, fail hard on usage
++ }
++
++ /**
++ * Same as {@link #loadChunkDataAsync(ServerLevel, int, int, int, Consumer, boolean, boolean, boolean)}, except this function returns
++ * a {@link CompletableFuture} which is potentially completed ASYNCHRONOUSLY ON THE FILE IO THREAD when the load task
++ * has completed.
++ *
++ * Note that if the chunk fails to load the returned future is completed with {@code null}.
++ *
++ */
++ public CompletableFuture loadChunkDataAsyncFuture(final ServerLevel world, final int chunkX, final int chunkZ,
++ final int priority, final boolean readPoiData, final boolean readChunkData,
++ final boolean intendingToBlock) {
++ final CompletableFuture future = new CompletableFuture<>();
++ this.loadChunkDataAsync(world, chunkX, chunkZ, priority, future::complete, readPoiData, readChunkData, intendingToBlock);
++ return future;
++ }
++
++ /**
++ * Schedules a load to be executed asynchronously.
++ *
++ * Impl notes:
++ *
++ *
++ * If a chunk fails to load, the {@code onComplete} parameter is completed with {@code null}.
++ *
++ *
++ * It is possible for the {@code onComplete} parameter to be given {@link ChunkData} containing data
++ * this call did not request.
++ *
++ *
++ * The {@code onComplete} parameter may be completed during the execution of this function synchronously or it may
++ * be completed asynchronously on this file io thread. Interacting with the file IO thread in the completion of
++ * data is undefined behaviour, and can cause deadlock.
++ *
++ * @param world Chunk's world
++ * @param chunkX Chunk's x coordinate
++ * @param chunkZ Chunk's z coordinate
++ * @param priority Priority level for this task. See {@link PrioritizedTaskQueue}
++ * @param onComplete Consumer to execute once this task has completed
++ * @param readPoiData Whether to read point of interest data. If {@code false}, the {@code NBTTagCompound} will be {@code null}.
++ * @param readChunkData Whether to read chunk data. If {@code false}, the {@code NBTTagCompound} will be {@code null}.
++ * @return The {@link PrioritizedTaskQueue.PrioritizedTask} associated with this task. Note that this task does not support
++ * cancellation.
++ */
++ public void loadChunkDataAsync(final ServerLevel world, final int chunkX, final int chunkZ,
++ final int priority, final Consumer onComplete,
++ final boolean readPoiData, final boolean readChunkData,
++ final boolean intendingToBlock) {
++ if (!PrioritizedTaskQueue.validPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority: " + priority);
++ }
++
++ if (!(readPoiData | readChunkData)) {
++ throw new IllegalArgumentException("Must read chunk data or poi data");
++ }
++
++ final ChunkData complete = new ChunkData();
++ // Paper start - rewrite chunk system
++ final java.util.List types = new java.util.ArrayList<>();
++ if (readPoiData) {
++ types.add(io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.POI_DATA);
++ }
++ if (readChunkData) {
++ types.add(io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.CHUNK_DATA);
++ }
++ final ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority newPriority;
++ switch (priority) {
++ case PrioritizedTaskQueue.HIGHEST_PRIORITY -> newPriority = ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority.BLOCKING;
++ case PrioritizedTaskQueue.HIGHER_PRIORITY -> newPriority = ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority.HIGHEST;
++ case PrioritizedTaskQueue.HIGH_PRIORITY -> newPriority = ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority.HIGH;
++ case PrioritizedTaskQueue.NORMAL_PRIORITY -> newPriority = ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority.NORMAL;
++ case PrioritizedTaskQueue.LOW_PRIORITY -> newPriority = ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority.LOW;
++ case PrioritizedTaskQueue.LOWEST_PRIORITY -> newPriority = ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority.IDLE;
++ default -> throw new IllegalStateException("Legacy priority " + priority + " should be valid");
++ }
++ final Consumer transformComplete = (io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileData data) -> {
++ if (readPoiData) {
++ if (data.getThrowable(io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.POI_DATA) != null) {
++ complete.poiData = FAILURE_VALUE;
++ } else {
++ complete.poiData = data.getData(io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.POI_DATA);
++ }
++ }
++
++ if (readChunkData) {
++ if (data.getThrowable(io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.CHUNK_DATA) != null) {
++ complete.chunkData = FAILURE_VALUE;
++ } else {
++ complete.chunkData = data.getData(io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.CHUNK_DATA);
++ }
++ }
++
++ onComplete.accept(complete);
++ };
++ io.papermc.paper.chunk.system.io.RegionFileIOThread.loadChunkData(world, chunkX, chunkZ, transformComplete, intendingToBlock, newPriority, types.toArray(new io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType[0]));
++ // Paper end - rewrite chunk system
++
++ }
++
++ // Note: the onComplete may be called asynchronously or synchronously here.
++ private void scheduleRead(final ChunkDataController dataController, final ServerLevel world,
++ final int chunkX, final int chunkZ, final Consumer onComplete, final int priority,
++ final boolean intendingToBlock) {
++ throw new IllegalStateException("Shouldn't get here, use RegionFileIOThread"); // Paper - rewrite chunk system, fail hard on usage
++ }
++
++ /**
++ * Same as {@link #loadChunkDataAsync(ServerLevel, int, int, int, Consumer, boolean, boolean, boolean)}, except this function returns
++ * the {@link ChunkData} associated with the specified chunk when the task is complete.
++ * @return The chunk data, or {@code null} if the chunk failed to load.
++ */
++ public ChunkData loadChunkData(final ServerLevel world, final int chunkX, final int chunkZ, final int priority,
++ final boolean readPoiData, final boolean readChunkData) {
++ return this.loadChunkDataAsyncFuture(world, chunkX, chunkZ, priority, readPoiData, readChunkData, true).join();
++ }
++
++ /**
++ * Schedules the given task at the specified priority to be executed on the IO thread.
++ *
++ * Internal api. Do not use.
++ *
++ */
++ public void runTask(final int priority, final Runnable runnable) {
++ throw new IllegalStateException("Shouldn't get here, use RegionFileIOThread"); // Paper - rewrite chunk system, fail hard on usage
++ }
++
++ static final class GeneralTask extends PrioritizedTaskQueue.PrioritizedTask implements Runnable {
++
++ private final Runnable run;
++
++ public GeneralTask(final int priority, final Runnable run) {
++ super(priority);
++ this.run = IOUtil.notNull(run, "Task may not be null");
++ }
++
++ @Override
++ public void run() {
++ try {
++ this.run.run();
++ } catch (final Throwable throwable) {
++ if (throwable instanceof ThreadDeath) {
++ throw (ThreadDeath)throwable;
++ }
++ LOGGER.error("Failed to execute general task on IO thread " + IOUtil.genericToString(this.run), throwable);
++ }
++ }
++ }
++
++ public static final class ChunkData {
++
++ public CompoundTag poiData;
++ public CompoundTag chunkData;
++
++ public ChunkData() {}
++
++ public ChunkData(final CompoundTag poiData, final CompoundTag chunkData) {
++ this.poiData = poiData;
++ this.chunkData = chunkData;
++ }
++ }
++
++ public static abstract class ChunkDataController {
++
++ // ConcurrentHashMap synchronizes per chain, so reduce the chance of task's hashes colliding.
++ public final ConcurrentHashMap tasks = new ConcurrentHashMap<>(64, 0.5f);
++
++ public abstract void writeData(final int x, final int z, final CompoundTag compound) throws IOException;
++ public abstract CompoundTag readData(final int x, final int z) throws IOException;
++
++ public abstract T computeForRegionFile(final int chunkX, final int chunkZ, final Function function);
++ public abstract T computeForRegionFileIfLoaded(final int chunkX, final int chunkZ, final Function function);
++
++ public static final class InProgressWrite {
++ public long writeCounter;
++ public CompoundTag data;
++ }
++
++ public static final class InProgressRead {
++ public final CompletableFuture readFuture = new CompletableFuture<>();
++ }
++ }
++
++ public static final class ChunkDataTask extends PrioritizedTaskQueue.PrioritizedTask implements Runnable {
++
++ public ChunkDataController.InProgressWrite inProgressWrite;
++ public ChunkDataController.InProgressRead inProgressRead;
++
++ private final ServerLevel world;
++ private final int x;
++ private final int z;
++ private final ChunkDataController taskController;
++
++ public ChunkDataTask(final int priority, final ServerLevel world, final int x, final int z, final ChunkDataController taskController) {
++ super(priority);
++ this.world = world;
++ this.x = x;
++ this.z = z;
++ this.taskController = taskController;
++ }
++
++ @Override
++ public String toString() {
++ return "Task for world: '" + this.world.getWorld().getName() + "' at " + this.x + "," + this.z +
++ " poi: " + (this.taskController == null) + ", hash: " + this.hashCode(); // Paper - TODO rewrite chunk system
++ }
++
++ /*
++ *
++ * IO thread will perform reads before writes
++ *
++ * How reads/writes are scheduled:
++ *
++ * If read in progress while scheduling write, ignore read and schedule write
++ * If read in progress while scheduling read (no write in progress), chain the read task
++ *
++ *
++ * If write in progress while scheduling read, use the pending write data and ret immediately
++ * If write in progress while scheduling write (ignore read in progress), overwrite the write in progress data
++ *
++ * This allows the reads and writes to act as if they occur synchronously to the thread scheduling them, however
++ * it fails to properly propagate write failures
++ *
++ */
++
++ void reschedule(final int priority) {
++ // priority is checked before this stage // TODO what
++ this.queue.lazySet(null);
++ this.priority.lazySet(priority);
++ PaperFileIOThread.Holder.INSTANCE.queueTask(this);
++ }
++
++ @Override
++ public void run() {
++ if (true) throw new IllegalStateException("Shouldn't get here, use RegionFileIOThread"); // Paper - rewrite chunk system, fail hard on usage
++ ChunkDataController.InProgressRead read = this.inProgressRead;
++ if (read != null) {
++ CompoundTag compound = PaperFileIOThread.FAILURE_VALUE;
++ try {
++ compound = this.taskController.readData(this.x, this.z);
++ } catch (final Throwable thr) {
++ if (thr instanceof ThreadDeath) {
++ throw (ThreadDeath)thr;
++ }
++ LOGGER.error("Failed to read chunk data for task: " + this.toString(), thr);
++ // fall through to complete with null data
++ }
++ read.readFuture.complete(compound);
++ }
++
++ final Long chunkKey = Long.valueOf(IOUtil.getCoordinateKey(this.x, this.z));
++
++ ChunkDataController.InProgressWrite write = this.inProgressWrite;
++
++ if (write == null) {
++ // IntelliJ warns this is invalid, however it does not consider that writes to the task map & the inProgress field can occur concurrently.
++ ChunkDataTask inMap = this.taskController.tasks.compute(chunkKey, (final Long keyInMap, final ChunkDataTask valueInMap) -> {
++ if (valueInMap == null) {
++ throw new IllegalStateException("Write completed concurrently, expected this task: " + ChunkDataTask.this.toString() + ", report this!");
++ }
++ if (valueInMap != ChunkDataTask.this) {
++ throw new IllegalStateException("Chunk task mismatch, expected this task: " + ChunkDataTask.this.toString() + ", got: " + valueInMap.toString() + ", report this!");
++ }
++ return valueInMap.inProgressWrite == null ? null : valueInMap;
++ });
++
++ if (inMap == null) {
++ return; // set the task value to null, indicating we're done
++ }
++
++ // not null, which means there was a concurrent write
++ write = this.inProgressWrite;
++ }
++
++ for (;;) {
++ final long writeCounter;
++ final CompoundTag data;
++
++ //noinspection SynchronizationOnLocalVariableOrMethodParameter
++ synchronized (write) {
++ writeCounter = write.writeCounter;
++ data = write.data;
++ }
++
++ boolean failedWrite = false;
++
++ try {
++ this.taskController.writeData(this.x, this.z, data);
++ } catch (final Throwable thr) {
++ if (thr instanceof ThreadDeath) {
++ throw (ThreadDeath)thr;
++ }
++ LOGGER.error("Failed to write chunk data for task: " + this.toString(), thr);
++ failedWrite = true;
++ }
++
++ boolean finalFailWrite = failedWrite;
++
++ ChunkDataTask inMap = this.taskController.tasks.compute(chunkKey, (final Long keyInMap, final ChunkDataTask valueInMap) -> {
++ if (valueInMap == null) {
++ throw new IllegalStateException("Write completed concurrently, expected this task: " + ChunkDataTask.this.toString() + ", report this!");
++ }
++ if (valueInMap != ChunkDataTask.this) {
++ throw new IllegalStateException("Chunk task mismatch, expected this task: " + ChunkDataTask.this.toString() + ", got: " + valueInMap.toString() + ", report this!");
++ }
++ if (valueInMap.inProgressWrite.writeCounter == writeCounter) {
++ if (finalFailWrite) {
++ valueInMap.inProgressWrite.writeCounter = -1L;
++ }
++
++ return null;
++ }
++ return valueInMap;
++ // Hack end
++ });
++
++ if (inMap == null) {
++ // write counter matched, so we wrote the most up-to-date pending data, we're done here
++ // or we failed to write and successfully set the write counter to -1
++ return; // we're done here
++ }
++
++ // fetch & write new data
++ continue;
++ }
++ }
++ }
++}
+diff --git a/src/main/java/com/destroystokyo/paper/io/PrioritizedTaskQueue.java b/src/main/java/com/destroystokyo/paper/io/PrioritizedTaskQueue.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..7844a3515430472bd829ff246396bceb0797de1b
+--- /dev/null
++++ b/src/main/java/com/destroystokyo/paper/io/PrioritizedTaskQueue.java
+@@ -0,0 +1,299 @@
++package com.destroystokyo.paper.io;
++
++import java.util.concurrent.ConcurrentLinkedQueue;
++import java.util.concurrent.atomic.AtomicBoolean;
++import java.util.concurrent.atomic.AtomicInteger;
++import java.util.concurrent.atomic.AtomicReference;
++
++@Deprecated(forRemoval = true)
++public class PrioritizedTaskQueue {
++
++ // lower numbers are a higher priority (except < 0)
++ // higher priorities are always executed before lower priorities
++
++ /**
++ * Priority value indicating the task has completed or is being completed.
++ */
++ public static final int COMPLETING_PRIORITY = -1;
++
++ /**
++ * Highest priority, should only be used for main thread tasks or tasks that are blocking the main thread.
++ */
++ public static final int HIGHEST_PRIORITY = 0;
++
++ /**
++ * Should be only used in an IO task so that chunk loads do not wait on other IO tasks.
++ * This only exists because IO tasks are scheduled before chunk load tasks to decrease IO waiting times.
++ */
++ public static final int HIGHER_PRIORITY = 1;
++
++ /**
++ * Should be used for scheduling chunk loads/generation that would increase response times to users.
++ */
++ public static final int HIGH_PRIORITY = 2;
++
++ /**
++ * Default priority.
++ */
++ public static final int NORMAL_PRIORITY = 3;
++
++ /**
++ * Use for tasks not at all critical and can potentially be delayed.
++ */
++ public static final int LOW_PRIORITY = 4;
++
++ /**
++ * Use for tasks that should "eventually" execute.
++ */
++ public static final int LOWEST_PRIORITY = 5;
++
++ private static final int TOTAL_PRIORITIES = 6;
++
++ final ConcurrentLinkedQueue[] queues = (ConcurrentLinkedQueue[])new ConcurrentLinkedQueue[TOTAL_PRIORITIES];
++
++ private final AtomicBoolean shutdown = new AtomicBoolean();
++
++ {
++ for (int i = 0; i < TOTAL_PRIORITIES; ++i) {
++ this.queues[i] = new ConcurrentLinkedQueue<>();
++ }
++ }
++
++ /**
++ * Returns whether the specified priority is valid
++ */
++ public static boolean validPriority(final int priority) {
++ return priority >= 0 && priority < TOTAL_PRIORITIES;
++ }
++
++ /**
++ * Queues a task.
++ * @throws IllegalStateException If the task has already been queued. Use {@link PrioritizedTask#raisePriority(int)} to
++ * raise a task's priority.
++ * This can also be thrown if the queue has shutdown.
++ */
++ public void add(final T task) throws IllegalStateException {
++ int priority = task.getPriority();
++ if (priority != COMPLETING_PRIORITY) {
++ task.setQueue(this);
++ this.queues[priority].add(task);
++ }
++ if (this.shutdown.get()) {
++ // note: we're not actually sure at this point if our task will go through
++ throw new IllegalStateException("Queue has shutdown, refusing to execute task " + IOUtil.genericToString(task));
++ }
++ }
++
++ /**
++ * Polls the highest priority task currently available. {@code null} if none.
++ */
++ public T poll() {
++ T task;
++ for (int i = 0; i < TOTAL_PRIORITIES; ++i) {
++ final ConcurrentLinkedQueue queue = this.queues[i];
++
++ while ((task = queue.poll()) != null) {
++ final int prevPriority = task.tryComplete(i);
++ if (prevPriority != COMPLETING_PRIORITY && prevPriority <= i) {
++ // if the prev priority was greater-than or equal to our current priority
++ return task;
++ }
++ }
++ }
++
++ return null;
++ }
++
++ /**
++ * Polls the highest priority task currently available. {@code null} if none.
++ */
++ public T poll(final int lowestPriority) {
++ T task;
++ final int max = Math.min(LOWEST_PRIORITY, lowestPriority);
++ for (int i = 0; i <= max; ++i) {
++ final ConcurrentLinkedQueue queue = this.queues[i];
++
++ while ((task = queue.poll()) != null) {
++ final int prevPriority = task.tryComplete(i);
++ if (prevPriority != COMPLETING_PRIORITY && prevPriority <= i) {
++ // if the prev priority was greater-than or equal to our current priority
++ return task;
++ }
++ }
++ }
++
++ return null;
++ }
++
++ /**
++ * Returns whether this queue may have tasks queued.
++ *
++ * This operation is not atomic, but is MT-Safe.
++ *
++ * @return {@code true} if tasks may be queued, {@code false} otherwise
++ */
++ public boolean hasTasks() {
++ for (int i = 0; i < TOTAL_PRIORITIES; ++i) {
++ final ConcurrentLinkedQueue queue = this.queues[i];
++
++ if (queue.peek() != null) {
++ return true;
++ }
++ }
++ return false;
++ }
++
++ /**
++ * Prevent further additions to this queue. Attempts to add after this call has completed (potentially during) will
++ * result in {@link IllegalStateException} being thrown.
++ *
++ * This operation is atomic with respect to other shutdown calls
++ *
++ *
++ * After this call has completed, regardless of return value, this queue will be shutdown.
++ *
++ * @return {@code true} if the queue was shutdown, {@code false} if it has shut down already
++ */
++ public boolean shutdown() {
++ return this.shutdown.getAndSet(false);
++ }
++
++ public abstract static class PrioritizedTask {
++
++ protected final AtomicReference queue = new AtomicReference<>();
++
++ protected final AtomicInteger priority;
++
++ protected PrioritizedTask() {
++ this(PrioritizedTaskQueue.NORMAL_PRIORITY);
++ }
++
++ protected PrioritizedTask(final int priority) {
++ if (!PrioritizedTaskQueue.validPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority " + priority);
++ }
++ this.priority = new AtomicInteger(priority);
++ }
++
++ /**
++ * Returns the current priority. Note that {@link PrioritizedTaskQueue#COMPLETING_PRIORITY} will be returned
++ * if this task is completing or has completed.
++ */
++ public final int getPriority() {
++ return this.priority.get();
++ }
++
++ /**
++ * Returns whether this task is scheduled to execute, or has been already executed.
++ */
++ public boolean isScheduled() {
++ return this.queue.get() != null;
++ }
++
++ final int tryComplete(final int minPriority) {
++ for (int curr = this.getPriorityVolatile();;) {
++ if (curr == COMPLETING_PRIORITY) {
++ return COMPLETING_PRIORITY;
++ }
++ if (curr > minPriority) {
++ // curr is lower priority
++ return curr;
++ }
++
++ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, COMPLETING_PRIORITY))) {
++ return curr;
++ }
++ continue;
++ }
++ }
++
++ /**
++ * Forces this task to be completed.
++ * @return {@code true} if the task was cancelled, {@code false} if the task has already completed or is being completed.
++ */
++ public boolean cancel() {
++ return this.exchangePriorityVolatile(PrioritizedTaskQueue.COMPLETING_PRIORITY) != PrioritizedTaskQueue.COMPLETING_PRIORITY;
++ }
++
++ /**
++ * Attempts to raise the priority to the priority level specified.
++ * @param priority Priority specified
++ * @return {@code true} if successful, {@code false} otherwise.
++ */
++ public boolean raisePriority(final int priority) {
++ if (!PrioritizedTaskQueue.validPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority");
++ }
++
++ for (int curr = this.getPriorityVolatile();;) {
++ if (curr == COMPLETING_PRIORITY) {
++ return false;
++ }
++ if (priority >= curr) {
++ return true;
++ }
++
++ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, priority))) {
++ PrioritizedTaskQueue queue = this.queue.get();
++ if (queue != null) {
++ //noinspection unchecked
++ queue.queues[priority].add(this); // silently fail on shutdown
++ }
++ return true;
++ }
++ continue;
++ }
++ }
++
++ /**
++ * Attempts to set this task's priority level to the level specified.
++ * @param priority Specified priority level.
++ * @return {@code true} if successful, {@code false} if this task is completing or has completed.
++ */
++ public boolean updatePriority(final int priority) {
++ if (!PrioritizedTaskQueue.validPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority");
++ }
++
++ for (int curr = this.getPriorityVolatile();;) {
++ if (curr == COMPLETING_PRIORITY) {
++ return false;
++ }
++ if (curr == priority) {
++ return true;
++ }
++
++ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, priority))) {
++ PrioritizedTaskQueue queue = this.queue.get();
++ if (queue != null) {
++ //noinspection unchecked
++ queue.queues[priority].add(this); // silently fail on shutdown
++ }
++ return true;
++ }
++ continue;
++ }
++ }
++
++ void setQueue(final PrioritizedTaskQueue queue) {
++ this.queue.set(queue);
++ }
++
++ /* priority */
++
++ protected final int getPriorityVolatile() {
++ return this.priority.get();
++ }
++
++ protected final int compareAndExchangePriorityVolatile(final int expect, final int update) {
++ if (this.priority.compareAndSet(expect, update)) {
++ return expect;
++ }
++ return this.priority.get();
++ }
++
++ protected final int exchangePriorityVolatile(final int value) {
++ return this.priority.getAndSet(value);
++ }
++ }
++}
+diff --git a/src/main/java/com/destroystokyo/paper/io/QueueExecutorThread.java b/src/main/java/com/destroystokyo/paper/io/QueueExecutorThread.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..99f49b5625cf51d6c97640553cf5c420bb6fdd36
+--- /dev/null
++++ b/src/main/java/com/destroystokyo/paper/io/QueueExecutorThread.java
+@@ -0,0 +1,255 @@
++package com.destroystokyo.paper.io;
++
++import com.mojang.logging.LogUtils;
++import org.slf4j.Logger;
++
++import java.util.concurrent.ConcurrentLinkedQueue;
++import java.util.concurrent.atomic.AtomicBoolean;
++import java.util.concurrent.locks.LockSupport;
++
++@Deprecated(forRemoval = true)
++public class QueueExecutorThread extends Thread {
++
++ private static final Logger LOGGER = LogUtils.getLogger();
++
++ protected final PrioritizedTaskQueue queue;
++ protected final long spinWaitTime;
++
++ protected volatile boolean closed;
++
++ protected final AtomicBoolean parked = new AtomicBoolean();
++
++ protected volatile ConcurrentLinkedQueue flushQueue = new ConcurrentLinkedQueue<>();
++ protected volatile long flushCycles;
++
++ protected int lowestPriorityToPoll = PrioritizedTaskQueue.LOWEST_PRIORITY;
++
++ public int getLowestPriorityToPoll() {
++ return this.lowestPriorityToPoll;
++ }
++
++ public void setLowestPriorityToPoll(final int lowestPriorityToPoll) {
++ if (this.isAlive()) {
++ throw new IllegalStateException("Cannot set after starting");
++ }
++ this.lowestPriorityToPoll = lowestPriorityToPoll;
++ }
++
++ public QueueExecutorThread(final PrioritizedTaskQueue queue) {
++ this(queue, (int)(1.e6)); // 1.0ms
++ }
++
++ public QueueExecutorThread(final PrioritizedTaskQueue queue, final long spinWaitTime) { // in ms
++ this.queue = queue;
++ this.spinWaitTime = spinWaitTime;
++ }
++
++ @Override
++ public void run() {
++ final long spinWaitTime = this.spinWaitTime;
++ main_loop:
++ for (;;) {
++ this.pollTasks(true);
++
++ // spinwait
++
++ final long start = System.nanoTime();
++
++ for (;;) {
++ // If we are interrpted for any reason, park() will always return immediately. Clear so that we don't needlessly use cpu in such an event.
++ Thread.interrupted();
++ LockSupport.parkNanos("Spinwaiting on tasks", 1000L); // 1us
++
++ if (this.pollTasks(true)) {
++ // restart loop, found tasks
++ continue main_loop;
++ }
++
++ if (this.handleClose()) {
++ return; // we're done
++ }
++
++ if ((System.nanoTime() - start) >= spinWaitTime) {
++ break;
++ }
++ }
++
++ if (this.handleClose()) {
++ return;
++ }
++
++ this.parked.set(true);
++
++ // We need to parse here to avoid a race condition where a thread queues a task before we set parked to true
++ // (i.e it will not notify us)
++ if (this.pollTasks(true)) {
++ this.parked.set(false);
++ continue;
++ }
++
++ if (this.handleClose()) {
++ return;
++ }
++
++ // we don't need to check parked before sleeping, but we do need to check parked in a do-while loop
++ // LockSupport.park() can fail for any reason
++ do {
++ Thread.interrupted();
++ LockSupport.park("Waiting on tasks");
++ } while (this.parked.get());
++ }
++ }
++
++ protected boolean handleClose() {
++ if (this.closed) {
++ this.pollTasks(true); // this ensures we've emptied the queue
++ this.handleFlushThreads(true);
++ return true;
++ }
++ return false;
++ }
++
++ protected boolean pollTasks(boolean flushTasks) {
++ Runnable task;
++ boolean ret = false;
++
++ while ((task = this.queue.poll(this.lowestPriorityToPoll)) != null) {
++ ret = true;
++ try {
++ task.run();
++ } catch (final Throwable throwable) {
++ if (throwable instanceof ThreadDeath) {
++ throw (ThreadDeath)throwable;
++ }
++ LOGGER.error("Exception thrown from prioritized runnable task in thread '" + this.getName() + "': " + IOUtil.genericToString(task), throwable);
++ }
++ }
++
++ if (flushTasks) {
++ this.handleFlushThreads(false);
++ }
++
++ return ret;
++ }
++
++ protected void handleFlushThreads(final boolean shutdown) {
++ Thread parking;
++ ConcurrentLinkedQueue flushQueue = this.flushQueue;
++ do {
++ ++flushCycles; // may be plain read opaque write
++ while ((parking = flushQueue.poll()) != null) {
++ LockSupport.unpark(parking);
++ }
++ } while (this.pollTasks(false));
++
++ if (shutdown) {
++ this.flushQueue = null;
++
++ // defend against a race condition where a flush thread double-checks right before we set to null
++ while ((parking = flushQueue.poll()) != null) {
++ LockSupport.unpark(parking);
++ }
++ }
++ }
++
++ /**
++ * Notify's this thread that a task has been added to its queue
++ * @return {@code true} if this thread was waiting for tasks, {@code false} if it is executing tasks
++ */
++ public boolean notifyTasks() {
++ if (this.parked.get() && this.parked.getAndSet(false)) {
++ LockSupport.unpark(this);
++ return true;
++ }
++ return false;
++ }
++
++ protected void queueTask(final T task) {
++ this.queue.add(task);
++ this.notifyTasks();
++ }
++
++ /**
++ * Waits until this thread's queue is empty.
++ *
++ * @throws IllegalStateException If the current thread is {@code this} thread.
++ */
++ public void flush() {
++ final Thread currentThread = Thread.currentThread();
++
++ if (currentThread == this) {
++ // avoid deadlock
++ throw new IllegalStateException("Cannot flush the queue executor thread while on the queue executor thread");
++ }
++
++ // order is important
++
++ int successes = 0;
++ long lastCycle = -1L;
++
++ do {
++ final ConcurrentLinkedQueue flushQueue = this.flushQueue;
++ if (flushQueue == null) {
++ return;
++ }
++
++ flushQueue.add(currentThread);
++
++ // double check flush queue
++ if (this.flushQueue == null) {
++ return;
++ }
++
++ final long currentCycle = this.flushCycles; // may be opaque read
++
++ if (currentCycle == lastCycle) {
++ Thread.yield();
++ continue;
++ }
++
++ // force response
++ this.parked.set(false);
++ LockSupport.unpark(this);
++
++ LockSupport.park("flushing queue executor thread");
++
++ // returns whether there are tasks queued, does not return whether there are tasks executing
++ // this is why we cycle twice twice through flush (we know a pollTask call is made after a flush cycle)
++ // we really only need to guarantee that the tasks this thread has queued has gone through, and can leave
++ // tasks queued concurrently that are unsychronized with this thread as undefined behavior
++ if (this.queue.hasTasks()) {
++ successes = 0;
++ } else {
++ ++successes;
++ }
++
++ } while (successes != 2);
++
++ }
++
++ /**
++ * Closes this queue executor's queue and optionally waits for it to empty.
++ *
++ * If wait is {@code true}, then the queue will be empty by the time this call completes.
++ *
++ *
++ * This function is MT-Safe.
++ *
++ * @param wait If this call is to wait until the queue is empty
++ * @param killQueue Whether to shutdown this thread's queue
++ * @return whether this thread shut down the queue
++ */
++ public boolean close(final boolean wait, final boolean killQueue) {
++ boolean ret = !killQueue ? false : this.queue.shutdown();
++ this.closed = true;
++
++ // force thread to respond to the shutdown
++ this.parked.set(false);
++ LockSupport.unpark(this);
++
++ if (wait) {
++ this.flush();
++ }
++ return ret;
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/chunk/PlayerChunkLoader.java b/src/main/java/io/papermc/paper/chunk/PlayerChunkLoader.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..dd501e83d991e45598509134fab05bafc1904953
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/chunk/PlayerChunkLoader.java
+@@ -0,0 +1,1128 @@
++package io.papermc.paper.chunk;
++
++import com.destroystokyo.paper.util.misc.PlayerAreaMap;
++import com.destroystokyo.paper.util.misc.PooledLinkedHashSets;
++import io.papermc.paper.configuration.GlobalConfiguration;
++import io.papermc.paper.util.CoordinateUtils;
++import io.papermc.paper.util.IntervalledCounter;
++import io.papermc.paper.util.TickThread;
++import it.unimi.dsi.fastutil.longs.LongOpenHashSet;
++import it.unimi.dsi.fastutil.objects.Reference2IntOpenHashMap;
++import it.unimi.dsi.fastutil.objects.Reference2ObjectLinkedOpenHashMap;
++import it.unimi.dsi.fastutil.objects.ReferenceLinkedOpenHashSet;
++import net.minecraft.network.protocol.game.ClientboundSetChunkCacheCenterPacket;
++import net.minecraft.network.protocol.game.ClientboundSetChunkCacheRadiusPacket;
++import net.minecraft.network.protocol.game.ClientboundSetSimulationDistancePacket;
++import net.minecraft.server.MCUtil;
++import net.minecraft.server.MinecraftServer;
++import net.minecraft.server.level.*;
++import net.minecraft.util.Mth;
++import net.minecraft.world.level.ChunkPos;
++import net.minecraft.world.level.chunk.LevelChunk;
++import org.apache.commons.lang3.mutable.MutableObject;
++import org.bukkit.craftbukkit.entity.CraftPlayer;
++import org.bukkit.entity.Player;
++import java.util.ArrayDeque;
++import java.util.ArrayList;
++import java.util.List;
++import java.util.TreeSet;
++import java.util.concurrent.atomic.AtomicInteger;
++
++public final class PlayerChunkLoader {
++
++ public static final int MIN_VIEW_DISTANCE = 2;
++ public static final int MAX_VIEW_DISTANCE = 32;
++
++ public static final int TICK_TICKET_LEVEL = 31;
++ public static final int LOADED_TICKET_LEVEL = 33;
++
++ public static int getTickViewDistance(final Player player) {
++ return getTickViewDistance(((CraftPlayer)player).getHandle());
++ }
++
++ public static int getTickViewDistance(final ServerPlayer player) {
++ final ServerLevel level = (ServerLevel)player.level;
++ final PlayerLoaderData data = level.chunkSource.chunkMap.playerChunkManager.getData(player);
++ if (data == null) {
++ return level.chunkSource.chunkMap.playerChunkManager.getTargetTickViewDistance();
++ }
++ return data.getTargetTickViewDistance();
++ }
++
++ public static int getLoadViewDistance(final Player player) {
++ return getLoadViewDistance(((CraftPlayer)player).getHandle());
++ }
++
++ public static int getLoadViewDistance(final ServerPlayer player) {
++ final ServerLevel level = (ServerLevel)player.level;
++ final PlayerLoaderData data = level.chunkSource.chunkMap.playerChunkManager.getData(player);
++ if (data == null) {
++ return level.chunkSource.chunkMap.playerChunkManager.getLoadDistance();
++ }
++ return data.getLoadDistance();
++ }
++
++ public static int getSendViewDistance(final Player player) {
++ return getSendViewDistance(((CraftPlayer)player).getHandle());
++ }
++
++ public static int getSendViewDistance(final ServerPlayer player) {
++ final ServerLevel level = (ServerLevel)player.level;
++ final PlayerLoaderData data = level.chunkSource.chunkMap.playerChunkManager.getData(player);
++ if (data == null) {
++ return level.chunkSource.chunkMap.playerChunkManager.getTargetSendDistance();
++ }
++ return data.getTargetSendViewDistance();
++ }
++
++ protected final ChunkMap chunkMap;
++ protected final Reference2ObjectLinkedOpenHashMap playerMap = new Reference2ObjectLinkedOpenHashMap<>(512, 0.7f);
++ protected final ReferenceLinkedOpenHashSet chunkSendQueue = new ReferenceLinkedOpenHashSet<>(512, 0.7f);
++
++ protected final TreeSet chunkLoadQueue = new TreeSet<>((final PlayerLoaderData p1, final PlayerLoaderData p2) -> {
++ if (p1 == p2) {
++ return 0;
++ }
++
++ final ChunkPriorityHolder holder1 = p1.loadQueue.peekFirst();
++ final ChunkPriorityHolder holder2 = p2.loadQueue.peekFirst();
++
++ final int priorityCompare = Double.compare(holder1 == null ? Double.MAX_VALUE : holder1.priority, holder2 == null ? Double.MAX_VALUE : holder2.priority);
++
++ final int lastLoadTimeCompare = Long.compare(p1.lastChunkLoad, p2.lastChunkLoad);
++
++ if ((holder1 == null || holder2 == null || lastLoadTimeCompare == 0 || holder1.priority < 0.0 || holder2.priority < 0.0) && priorityCompare != 0) {
++ return priorityCompare;
++ }
++
++ if (lastLoadTimeCompare != 0) {
++ return lastLoadTimeCompare;
++ }
++
++ final int idCompare = Integer.compare(p1.player.getId(), p2.player.getId());
++
++ if (idCompare != 0) {
++ return idCompare;
++ }
++
++ // last resort
++ return Integer.compare(System.identityHashCode(p1), System.identityHashCode(p2));
++ });
++
++ protected final TreeSet chunkSendWaitQueue = new TreeSet<>((final PlayerLoaderData p1, final PlayerLoaderData p2) -> {
++ if (p1 == p2) {
++ return 0;
++ }
++
++ final int timeCompare = Long.compare(p1.nextChunkSendTarget, p2.nextChunkSendTarget);
++ if (timeCompare != 0) {
++ return timeCompare;
++ }
++
++ final int idCompare = Integer.compare(p1.player.getId(), p2.player.getId());
++
++ if (idCompare != 0) {
++ return idCompare;
++ }
++
++ // last resort
++ return Integer.compare(System.identityHashCode(p1), System.identityHashCode(p2));
++ });
++
++
++ // no throttling is applied below this VD for loading
++
++ /**
++ * The chunks to be sent to players, provided they're send-ready. Send-ready means the chunk and its 1 radius neighbours are loaded.
++ */
++ public final PlayerAreaMap broadcastMap;
++
++ /**
++ * The chunks to be brought up to send-ready status. Send-ready means the chunk and its 1 radius neighbours are loaded.
++ */
++ public final PlayerAreaMap loadMap;
++
++ /**
++ * Areamap used only to remove tickets for send-ready chunks. View distance is always + 1 of load view distance. Thus,
++ * this map is always representing the chunks we are actually going to load.
++ */
++ public final PlayerAreaMap loadTicketCleanup;
++
++ /**
++ * The chunks to brought to ticking level. Each chunk must have 2 radius neighbours loaded before this can happen.
++ */
++ public final PlayerAreaMap tickMap;
++
++ /**
++ * -1 if defaulting to [load distance], else always in [2, load distance]
++ */
++ protected int rawSendDistance = -1;
++
++ /**
++ * -1 if defaulting to [tick view distance + 1], else always in [tick view distance + 1, 32 + 1]
++ */
++ protected int rawLoadDistance = -1;
++
++ /**
++ * Never -1, always in [2, 32]
++ */
++ protected int rawTickDistance = -1;
++
++ // methods to bridge for API
++
++ public int getTargetTickViewDistance() {
++ return this.getTickDistance();
++ }
++
++ public void setTargetTickViewDistance(final int distance) {
++ this.setTickDistance(distance);
++ }
++
++ public int getTargetNoTickViewDistance() {
++ return this.getLoadDistance() - 1;
++ }
++
++ public void setTargetNoTickViewDistance(final int distance) {
++ this.setLoadDistance(distance == -1 ? -1 : distance + 1);
++ }
++
++ public int getTargetSendDistance() {
++ return this.rawSendDistance == -1 ? this.getLoadDistance() : this.rawSendDistance;
++ }
++
++ public void setTargetSendDistance(final int distance) {
++ this.setSendDistance(distance);
++ }
++
++ // internal methods
++
++ public int getSendDistance() {
++ final int loadDistance = this.getLoadDistance();
++ return this.rawSendDistance == -1 ? loadDistance : Math.min(this.rawSendDistance, loadDistance);
++ }
++
++ public void setSendDistance(final int distance) {
++ if (distance != -1 && (distance < MIN_VIEW_DISTANCE || distance > MAX_VIEW_DISTANCE + 1)) {
++ throw new IllegalArgumentException("Send distance must be a number between " + MIN_VIEW_DISTANCE + " and " + (MAX_VIEW_DISTANCE + 1) + ", or -1, got: " + distance);
++ }
++ this.rawSendDistance = distance;
++ }
++
++ public int getLoadDistance() {
++ final int tickDistance = this.getTickDistance();
++ return this.rawLoadDistance == -1 ? tickDistance + 1 : Math.max(tickDistance + 1, this.rawLoadDistance);
++ }
++
++ public void setLoadDistance(final int distance) {
++ if (distance != -1 && (distance < MIN_VIEW_DISTANCE || distance > MAX_VIEW_DISTANCE + 1)) {
++ throw new IllegalArgumentException("Load distance must be a number between " + MIN_VIEW_DISTANCE + " and " + (MAX_VIEW_DISTANCE + 1) + ", or -1, got: " + distance);
++ }
++ this.rawLoadDistance = distance;
++ }
++
++ public int getTickDistance() {
++ return this.rawTickDistance;
++ }
++
++ public void setTickDistance(final int distance) {
++ if (distance < MIN_VIEW_DISTANCE || distance > MAX_VIEW_DISTANCE) {
++ throw new IllegalArgumentException("View distance must be a number between " + MIN_VIEW_DISTANCE + " and " + MAX_VIEW_DISTANCE + ", got: " + distance);
++ }
++ this.rawTickDistance = distance;
++ }
++
++ /*
++ Players have 3 different types of view distance:
++ 1. Sending view distance
++ 2. Loading view distance
++ 3. Ticking view distance
++
++ But for configuration purposes (and API) there are:
++ 1. No-tick view distance
++ 2. Tick view distance
++ 3. Broadcast view distance
++
++ These aren't always the same as the types we represent internally.
++
++ Loading view distance is always max(no-tick + 1, tick + 1)
++ - no-tick has 1 added because clients need an extra radius to render chunks
++ - tick has 1 added because it needs an extra radius of chunks to load before they can be marked ticking
++
++ Loading view distance is defined as the radius of chunks that will be brought to send-ready status, which means
++ it loads chunks in radius load-view-distance + 1.
++
++ The maximum value for send view distance is the load view distance. API can set it lower.
++ */
++
++ public PlayerChunkLoader(final ChunkMap chunkMap, final PooledLinkedHashSets pooledHashSets) {
++ this.chunkMap = chunkMap;
++ this.broadcastMap = new PlayerAreaMap(pooledHashSets,
++ null,
++ (ServerPlayer player, int rangeX, int rangeZ, int currPosX, int currPosZ, int prevPosX, int prevPosZ,
++ com.destroystokyo.paper.util.misc.PooledLinkedHashSets.PooledObjectLinkedOpenHashSet newState) -> {
++ PlayerChunkLoader.this.onChunkLeave(player, rangeX, rangeZ);
++ });
++ this.loadMap = new PlayerAreaMap(pooledHashSets,
++ null,
++ (ServerPlayer player, int rangeX, int rangeZ, int currPosX, int currPosZ, int prevPosX, int prevPosZ,
++ com.destroystokyo.paper.util.misc.PooledLinkedHashSets.PooledObjectLinkedOpenHashSet newState) -> {
++ if (newState != null) {
++ return;
++ }
++ PlayerChunkLoader.this.isTargetedForPlayerLoad.remove(CoordinateUtils.getChunkKey(rangeX, rangeZ));
++ });
++ this.loadTicketCleanup = new PlayerAreaMap(pooledHashSets,
++ null,
++ (ServerPlayer player, int rangeX, int rangeZ, int currPosX, int currPosZ, int prevPosX, int prevPosZ,
++ com.destroystokyo.paper.util.misc.PooledLinkedHashSets.PooledObjectLinkedOpenHashSet newState) -> {
++ if (newState != null) {
++ return;
++ }
++ ChunkPos chunkPos = new ChunkPos(rangeX, rangeZ);
++ PlayerChunkLoader.this.chunkMap.level.getChunkSource().removeTicketAtLevel(TicketType.PLAYER, chunkPos, LOADED_TICKET_LEVEL, chunkPos);
++ if (PlayerChunkLoader.this.chunkTicketTracker.remove(chunkPos.toLong())) {
++ --PlayerChunkLoader.this.concurrentChunkLoads;
++ }
++ });
++ this.tickMap = new PlayerAreaMap(pooledHashSets,
++ (ServerPlayer player, int rangeX, int rangeZ, int currPosX, int currPosZ, int prevPosX, int prevPosZ,
++ com.destroystokyo.paper.util.misc.PooledLinkedHashSets.PooledObjectLinkedOpenHashSet newState) -> {
++ if (newState.size() != 1) {
++ return;
++ }
++ LevelChunk chunk = PlayerChunkLoader.this.chunkMap.level.getChunkSource().getChunkAtIfLoadedMainThreadNoCache(rangeX, rangeZ);
++ if (chunk == null || !chunk.areNeighboursLoaded(2)) {
++ return;
++ }
++
++ ChunkPos chunkPos = new ChunkPos(rangeX, rangeZ);
++ PlayerChunkLoader.this.chunkMap.level.getChunkSource().addTicketAtLevel(TicketType.PLAYER, chunkPos, TICK_TICKET_LEVEL, chunkPos);
++ },
++ (ServerPlayer player, int rangeX, int rangeZ, int currPosX, int currPosZ, int prevPosX, int prevPosZ,
++ com.destroystokyo.paper.util.misc.PooledLinkedHashSets.PooledObjectLinkedOpenHashSet newState) -> {
++ if (newState != null) {
++ return;
++ }
++ ChunkPos chunkPos = new ChunkPos(rangeX, rangeZ);
++ PlayerChunkLoader.this.chunkMap.level.getChunkSource().removeTicketAtLevel(TicketType.PLAYER, chunkPos, TICK_TICKET_LEVEL, chunkPos);
++ });
++ }
++
++ protected final LongOpenHashSet isTargetedForPlayerLoad = new LongOpenHashSet();
++ protected final LongOpenHashSet chunkTicketTracker = new LongOpenHashSet();
++
++ public boolean isChunkNearPlayers(final int chunkX, final int chunkZ) {
++ final PooledLinkedHashSets.PooledObjectLinkedOpenHashSet playersInSendRange = this.broadcastMap.getObjectsInRange(chunkX, chunkZ);
++
++ return playersInSendRange != null;
++ }
++
++ public void onChunkPostProcessing(final int chunkX, final int chunkZ) {
++ this.onChunkSendReady(chunkX, chunkZ);
++ }
++
++ private boolean chunkNeedsPostProcessing(final int chunkX, final int chunkZ) {
++ final long key = CoordinateUtils.getChunkKey(chunkX, chunkZ);
++ final ChunkHolder chunk = this.chunkMap.getVisibleChunkIfPresent(key);
++
++ if (chunk == null) {
++ return false;
++ }
++
++ final LevelChunk levelChunk = chunk.getSendingChunk();
++
++ return levelChunk != null && !levelChunk.isPostProcessingDone;
++ }
++
++ // rets whether the chunk is at a loaded stage that is ready to be sent to players
++ public boolean isChunkPlayerLoaded(final int chunkX, final int chunkZ) {
++ final long key = CoordinateUtils.getChunkKey(chunkX, chunkZ);
++ final ChunkHolder chunk = this.chunkMap.getVisibleChunkIfPresent(key);
++
++ if (chunk == null) {
++ return false;
++ }
++
++ final LevelChunk levelChunk = chunk.getSendingChunk();
++
++ return levelChunk != null && levelChunk.isPostProcessingDone && this.isTargetedForPlayerLoad.contains(key);
++ }
++
++ public boolean isChunkSent(final ServerPlayer player, final int chunkX, final int chunkZ, final boolean borderOnly) {
++ return borderOnly ? this.isChunkSentBorderOnly(player, chunkX, chunkZ) : this.isChunkSent(player, chunkX, chunkZ);
++ }
++
++ public boolean isChunkSent(final ServerPlayer player, final int chunkX, final int chunkZ) {
++ final PlayerLoaderData data = this.playerMap.get(player);
++ if (data == null) {
++ return false;
++ }
++
++ return data.hasSentChunk(chunkX, chunkZ);
++ }
++
++ public boolean isChunkSentBorderOnly(final ServerPlayer player, final int chunkX, final int chunkZ) {
++ final PlayerLoaderData data = this.playerMap.get(player);
++ if (data == null) {
++ return false;
++ }
++
++ final boolean center = data.hasSentChunk(chunkX, chunkZ);
++ if (!center) {
++ return false;
++ }
++
++ return !(data.hasSentChunk(chunkX - 1, chunkZ) && data.hasSentChunk(chunkX + 1, chunkZ) &&
++ data.hasSentChunk(chunkX, chunkZ - 1) && data.hasSentChunk(chunkX, chunkZ + 1));
++ }
++
++ protected int getMaxConcurrentChunkSends() {
++ return GlobalConfiguration.get().chunkLoading.maxConcurrentSends;
++ }
++
++ protected int getMaxChunkLoads() {
++ double config = GlobalConfiguration.get().chunkLoading.playerMaxConcurrentLoads;
++ double max = GlobalConfiguration.get().chunkLoading.globalMaxConcurrentLoads;
++ return (int)Math.ceil(Math.min(config * MinecraftServer.getServer().getPlayerCount(), max <= 1.0 ? Double.MAX_VALUE : max));
++ }
++
++ protected long getTargetSendPerPlayerAddend() {
++ return GlobalConfiguration.get().chunkLoading.targetPlayerChunkSendRate <= 1.0 ? 0L : (long)Math.round(1.0e9 / GlobalConfiguration.get().chunkLoading.targetPlayerChunkSendRate);
++ }
++
++ protected long getMaxSendAddend() {
++ return GlobalConfiguration.get().chunkLoading.globalMaxChunkSendRate <= 1.0 ? 0L : (long)Math.round(1.0e9 / GlobalConfiguration.get().chunkLoading.globalMaxChunkSendRate);
++ }
++
++ public void onChunkPlayerTickReady(final int chunkX, final int chunkZ) {
++ final ChunkPos chunkPos = new ChunkPos(chunkX, chunkZ);
++ this.chunkMap.level.getChunkSource().addTicketAtLevel(TicketType.PLAYER, chunkPos, TICK_TICKET_LEVEL, chunkPos);
++ }
++
++ public void onChunkSendReady(final int chunkX, final int chunkZ) {
++ final PooledLinkedHashSets.PooledObjectLinkedOpenHashSet playersInSendRange = this.broadcastMap.getObjectsInRange(chunkX, chunkZ);
++
++ if (playersInSendRange == null) {
++ return;
++ }
++
++ final Object[] rawData = playersInSendRange.getBackingSet();
++ for (int i = 0, len = rawData.length; i < len; ++i) {
++ final Object raw = rawData[i];
++
++ if (!(raw instanceof ServerPlayer)) {
++ continue;
++ }
++ this.onChunkSendReady((ServerPlayer)raw, chunkX, chunkZ);
++ }
++ }
++
++ public void onChunkSendReady(final ServerPlayer player, final int chunkX, final int chunkZ) {
++ final PlayerLoaderData data = this.playerMap.get(player);
++
++ if (data == null) {
++ return;
++ }
++
++ if (data.hasSentChunk(chunkX, chunkZ) || !this.isChunkPlayerLoaded(chunkX, chunkZ)) {
++ // if we don't have player tickets, then the load logic will pick this up and queue to send
++ return;
++ }
++
++ if (!data.chunksToBeSent.remove(CoordinateUtils.getChunkKey(chunkX, chunkZ))) {
++ // don't queue to send, we don't want the chunk
++ return;
++ }
++
++ final long playerPos = this.broadcastMap.getLastCoordinate(player);
++ final int playerChunkX = CoordinateUtils.getChunkX(playerPos);
++ final int playerChunkZ = CoordinateUtils.getChunkZ(playerPos);
++ final int manhattanDistance = Math.abs(playerChunkX - chunkX) + Math.abs(playerChunkZ - chunkZ);
++
++ final ChunkPriorityHolder holder = new ChunkPriorityHolder(chunkX, chunkZ, manhattanDistance, 0.0);
++ data.sendQueue.add(holder);
++ }
++
++ public void onChunkLoad(final int chunkX, final int chunkZ) {
++ if (this.chunkTicketTracker.remove(CoordinateUtils.getChunkKey(chunkX, chunkZ))) {
++ --this.concurrentChunkLoads;
++ }
++ }
++
++ public void onChunkLeave(final ServerPlayer player, final int chunkX, final int chunkZ) {
++ final PlayerLoaderData data = this.playerMap.get(player);
++
++ if (data == null) {
++ return;
++ }
++
++ data.unloadChunk(chunkX, chunkZ);
++ }
++
++ public void addPlayer(final ServerPlayer player) {
++ TickThread.ensureTickThread("Cannot add player async");
++ if (!player.isRealPlayer) {
++ return;
++ }
++ final PlayerLoaderData data = new PlayerLoaderData(player, this);
++ if (this.playerMap.putIfAbsent(player, data) == null) {
++ data.update();
++ }
++ }
++
++ public void removePlayer(final ServerPlayer player) {
++ TickThread.ensureTickThread("Cannot remove player async");
++ if (!player.isRealPlayer) {
++ return;
++ }
++
++ final PlayerLoaderData loaderData = this.playerMap.remove(player);
++ if (loaderData == null) {
++ return;
++ }
++ loaderData.remove();
++ this.chunkLoadQueue.remove(loaderData);
++ this.chunkSendQueue.remove(loaderData);
++ this.chunkSendWaitQueue.remove(loaderData);
++ synchronized (this.sendingChunkCounts) {
++ final int count = this.sendingChunkCounts.removeInt(loaderData);
++ if (count != 0) {
++ concurrentChunkSends.getAndAdd(-count);
++ }
++ }
++ }
++
++ public void updatePlayer(final ServerPlayer player) {
++ TickThread.ensureTickThread("Cannot update player async");
++ if (!player.isRealPlayer) {
++ return;
++ }
++ final PlayerLoaderData loaderData = this.playerMap.get(player);
++ if (loaderData != null) {
++ loaderData.update();
++ }
++ }
++
++ public PlayerLoaderData getData(final ServerPlayer player) {
++ return this.playerMap.get(player);
++ }
++
++ public void tick() {
++ TickThread.ensureTickThread("Cannot tick async");
++ for (final PlayerLoaderData data : this.playerMap.values()) {
++ data.update();
++ }
++ this.tickMidTick();
++ }
++
++ protected static final AtomicInteger concurrentChunkSends = new AtomicInteger();
++ protected final Reference2IntOpenHashMap sendingChunkCounts = new Reference2IntOpenHashMap<>();
++ private static long nextChunkSend;
++ private void trySendChunks() {
++ final long time = System.nanoTime();
++ if (time < nextChunkSend) {
++ return;
++ }
++ // drain entries from wait queue
++ while (!this.chunkSendWaitQueue.isEmpty()) {
++ final PlayerLoaderData data = this.chunkSendWaitQueue.first();
++
++ if (data.nextChunkSendTarget > time) {
++ break;
++ }
++
++ this.chunkSendWaitQueue.pollFirst();
++
++ this.chunkSendQueue.add(data);
++ }
++
++ if (this.chunkSendQueue.isEmpty()) {
++ return;
++ }
++
++ final int maxSends = this.getMaxConcurrentChunkSends();
++ final long nextPlayerDeadline = this.getTargetSendPerPlayerAddend() + time;
++ for (;;) {
++ if (this.chunkSendQueue.isEmpty()) {
++ break;
++ }
++ final int currSends = concurrentChunkSends.get();
++ if (currSends >= maxSends) {
++ break;
++ }
++
++ if (!concurrentChunkSends.compareAndSet(currSends, currSends + 1)) {
++ continue;
++ }
++
++ // send chunk
++
++ final PlayerLoaderData data = this.chunkSendQueue.removeFirst();
++
++ final ChunkPriorityHolder queuedSend = data.sendQueue.pollFirst();
++ if (queuedSend == null) {
++ concurrentChunkSends.getAndDecrement(); // we never sent, so decrease
++ // stop iterating over players who have nothing to send
++ if (this.chunkSendQueue.isEmpty()) {
++ // nothing left
++ break;
++ }
++ continue;
++ }
++
++ if (!this.isChunkPlayerLoaded(queuedSend.chunkX, queuedSend.chunkZ)) {
++ throw new IllegalStateException();
++ }
++
++ data.nextChunkSendTarget = nextPlayerDeadline;
++ this.chunkSendWaitQueue.add(data);
++
++ synchronized (this.sendingChunkCounts) {
++ this.sendingChunkCounts.addTo(data, 1);
++ }
++
++ data.sendChunk(queuedSend.chunkX, queuedSend.chunkZ, () -> {
++ synchronized (this.sendingChunkCounts) {
++ final int count = this.sendingChunkCounts.getInt(data);
++ if (count == 0) {
++ // disconnected, so we don't need to decrement: it will be decremented for us
++ return;
++ }
++ if (count == 1) {
++ this.sendingChunkCounts.removeInt(data);
++ } else {
++ this.sendingChunkCounts.put(data, count - 1);
++ }
++ }
++
++ concurrentChunkSends.getAndDecrement();
++ });
++
++ nextChunkSend = this.getMaxSendAddend() + time;
++ if (time < nextChunkSend) {
++ break;
++ }
++ }
++ }
++
++ protected int concurrentChunkLoads;
++ // this interval prevents bursting a lot of chunk loads
++ protected static final IntervalledCounter TICKET_ADDITION_COUNTER_SHORT = new IntervalledCounter((long)(1.0e6 * 50.0)); // 50ms
++ // this interval ensures the rate is kept between ticks correctly
++ protected static final IntervalledCounter TICKET_ADDITION_COUNTER_LONG = new IntervalledCounter((long)(1.0e6 * 1000.0)); // 1000ms
++ private void tryLoadChunks() {
++ if (this.chunkLoadQueue.isEmpty()) {
++ return;
++ }
++
++ final int maxLoads = this.getMaxChunkLoads();
++ final long time = System.nanoTime();
++ boolean updatedCounters = false;
++ for (;;) {
++ final PlayerLoaderData data = this.chunkLoadQueue.pollFirst();
++
++ data.lastChunkLoad = time;
++
++ final ChunkPriorityHolder queuedLoad = data.loadQueue.peekFirst();
++ if (queuedLoad == null) {
++ if (this.chunkLoadQueue.isEmpty()) {
++ break;
++ }
++ continue;
++ }
++
++ if (!updatedCounters) {
++ updatedCounters = true;
++ TICKET_ADDITION_COUNTER_SHORT.updateCurrentTime(time);
++ TICKET_ADDITION_COUNTER_LONG.updateCurrentTime(time);
++ data.ticketAdditionCounterShort.updateCurrentTime(time);
++ data.ticketAdditionCounterLong.updateCurrentTime(time);
++ }
++
++ if (this.isChunkPlayerLoaded(queuedLoad.chunkX, queuedLoad.chunkZ)) {
++ // already loaded!
++ data.loadQueue.pollFirst(); // already loaded so we just skip
++ this.chunkLoadQueue.add(data);
++
++ // ensure the chunk is queued to send
++ this.onChunkSendReady(queuedLoad.chunkX, queuedLoad.chunkZ);
++ continue;
++ }
++
++ final long chunkKey = CoordinateUtils.getChunkKey(queuedLoad.chunkX, queuedLoad.chunkZ);
++
++ final double priority = queuedLoad.priority;
++ // while we do need to rate limit chunk loads, the logic for sending chunks requires that tickets are present.
++ // when chunks are loaded (i.e spawn) but do not have this player's tickets, they have to wait behind the
++ // load queue. To avoid this problem, we check early here if tickets are required to load the chunk - if they
++ // aren't required, it bypasses the limiter system.
++ boolean unloadedTargetChunk = false;
++ unloaded_check:
++ for (int dz = -1; dz <= 1; ++dz) {
++ for (int dx = -1; dx <= 1; ++dx) {
++ final int offX = queuedLoad.chunkX + dx;
++ final int offZ = queuedLoad.chunkZ + dz;
++ if (this.chunkMap.level.getChunkSource().getChunkAtIfLoadedMainThreadNoCache(offX, offZ) == null) {
++ unloadedTargetChunk = true;
++ break unloaded_check;
++ }
++ }
++ }
++ if (unloadedTargetChunk && priority >= 0.0) {
++ // priority >= 0.0 implies rate limited chunks
++
++ final int currentChunkLoads = this.concurrentChunkLoads;
++ if (currentChunkLoads >= maxLoads || (GlobalConfiguration.get().chunkLoading.globalMaxChunkLoadRate > 0 && (TICKET_ADDITION_COUNTER_SHORT.getRate() >= GlobalConfiguration.get().chunkLoading.globalMaxChunkLoadRate || TICKET_ADDITION_COUNTER_LONG.getRate() >= GlobalConfiguration.get().chunkLoading.globalMaxChunkLoadRate))
++ || (GlobalConfiguration.get().chunkLoading.playerMaxChunkLoadRate > 0.0 && (data.ticketAdditionCounterShort.getRate() >= GlobalConfiguration.get().chunkLoading.playerMaxChunkLoadRate || data.ticketAdditionCounterLong.getRate() >= GlobalConfiguration.get().chunkLoading.playerMaxChunkLoadRate))) {
++ // don't poll, we didn't load it
++ this.chunkLoadQueue.add(data);
++ break;
++ }
++ }
++
++ // can only poll after we decide to load
++ data.loadQueue.pollFirst();
++
++ // now that we've polled we can re-add to load queue
++ this.chunkLoadQueue.add(data);
++
++ // add necessary tickets to load chunk up to send-ready
++ for (int dz = -1; dz <= 1; ++dz) {
++ for (int dx = -1; dx <= 1; ++dx) {
++ final int offX = queuedLoad.chunkX + dx;
++ final int offZ = queuedLoad.chunkZ + dz;
++ final ChunkPos chunkPos = new ChunkPos(offX, offZ);
++
++ this.chunkMap.level.getChunkSource().addTicketAtLevel(TicketType.PLAYER, chunkPos, LOADED_TICKET_LEVEL, chunkPos);
++ if (this.chunkMap.level.getChunkSource().getChunkAtIfLoadedMainThreadNoCache(offX, offZ) != null) {
++ continue;
++ }
++
++ if (priority > 0.0 && this.chunkTicketTracker.add(CoordinateUtils.getChunkKey(offX, offZ))) {
++ // won't reach here if unloadedTargetChunk is false
++ ++this.concurrentChunkLoads;
++ TICKET_ADDITION_COUNTER_SHORT.addTime(time);
++ TICKET_ADDITION_COUNTER_LONG.addTime(time);
++ data.ticketAdditionCounterShort.addTime(time);
++ data.ticketAdditionCounterLong.addTime(time);
++ }
++ }
++ }
++
++ // mark that we've added tickets here
++ this.isTargetedForPlayerLoad.add(chunkKey);
++
++ // it's possible all we needed was the player tickets to queue up the send.
++ if (this.isChunkPlayerLoaded(queuedLoad.chunkX, queuedLoad.chunkZ)) {
++ // yup, all we needed.
++ this.onChunkSendReady(queuedLoad.chunkX, queuedLoad.chunkZ);
++ } else if (this.chunkNeedsPostProcessing(queuedLoad.chunkX, queuedLoad.chunkZ)) {
++ // requires post processing
++ this.chunkMap.mainThreadExecutor.execute(() -> {
++ final long key = CoordinateUtils.getChunkKey(queuedLoad.chunkX, queuedLoad.chunkZ);
++ final ChunkHolder holder = PlayerChunkLoader.this.chunkMap.getVisibleChunkIfPresent(key);
++
++ if (holder == null) {
++ return;
++ }
++
++ final LevelChunk chunk = holder.getSendingChunk();
++
++ if (chunk != null && !chunk.isPostProcessingDone) {
++ chunk.postProcessGeneration();
++ }
++ });
++ }
++ }
++ }
++
++ public void tickMidTick() {
++ // try to send more chunks
++ this.trySendChunks();
++
++ // try to queue more chunks to load
++ this.tryLoadChunks();
++ }
++
++ static final class ChunkPriorityHolder {
++ public final int chunkX;
++ public final int chunkZ;
++ public final int manhattanDistanceToPlayer;
++ public final double priority;
++
++ public ChunkPriorityHolder(final int chunkX, final int chunkZ, final int manhattanDistanceToPlayer, final double priority) {
++ this.chunkX = chunkX;
++ this.chunkZ = chunkZ;
++ this.manhattanDistanceToPlayer = manhattanDistanceToPlayer;
++ this.priority = priority;
++ }
++ }
++
++ public static final class PlayerLoaderData {
++
++ protected static final float FOV = 110.0f;
++ protected static final double PRIORITISED_DISTANCE = 12.0 * 16.0;
++
++ // Player max sprint speed is approximately 8m/s
++ protected static final double LOOK_PRIORITY_SPEED_THRESHOLD = (10.0/20.0) * (10.0/20.0);
++ protected static final double LOOK_PRIORITY_YAW_DELTA_RECALC_THRESHOLD = 3.0f;
++
++ protected double lastLocX = Double.NEGATIVE_INFINITY;
++ protected double lastLocZ = Double.NEGATIVE_INFINITY;
++
++ protected int lastChunkX = Integer.MIN_VALUE;
++ protected int lastChunkZ = Integer.MIN_VALUE;
++
++ // this is corrected so that 0 is along the positive x-axis
++ protected float lastYaw = Float.NEGATIVE_INFINITY;
++
++ protected int lastSendDistance = Integer.MIN_VALUE;
++ protected int lastLoadDistance = Integer.MIN_VALUE;
++ protected int lastTickDistance = Integer.MIN_VALUE;
++ protected boolean usingLookingPriority;
++
++ protected final ServerPlayer player;
++ protected final PlayerChunkLoader loader;
++
++ // warning: modifications of this field must be aware that the loadQueue inside PlayerChunkLoader uses this field
++ // in a comparator!
++ protected final ArrayDeque loadQueue = new ArrayDeque<>();
++ protected final LongOpenHashSet sentChunks = new LongOpenHashSet();
++ protected final LongOpenHashSet chunksToBeSent = new LongOpenHashSet();
++
++ protected final TreeSet sendQueue = new TreeSet<>((final ChunkPriorityHolder p1, final ChunkPriorityHolder p2) -> {
++ final int distanceCompare = Integer.compare(p1.manhattanDistanceToPlayer, p2.manhattanDistanceToPlayer);
++ if (distanceCompare != 0) {
++ return distanceCompare;
++ }
++
++ final int coordinateXCompare = Integer.compare(p1.chunkX, p2.chunkX);
++ if (coordinateXCompare != 0) {
++ return coordinateXCompare;
++ }
++
++ return Integer.compare(p1.chunkZ, p2.chunkZ);
++ });
++
++ protected int sendViewDistance = -1;
++ protected int loadViewDistance = -1;
++ protected int tickViewDistance = -1;
++
++ protected long nextChunkSendTarget;
++
++ // this interval prevents bursting a lot of chunk loads
++ protected final IntervalledCounter ticketAdditionCounterShort = new IntervalledCounter((long)(1.0e6 * 50.0)); // 50ms
++ // this ensures the rate is kept between ticks correctly
++ protected final IntervalledCounter ticketAdditionCounterLong = new IntervalledCounter((long)(1.0e6 * 1000.0)); // 1000ms
++
++ public long lastChunkLoad;
++
++ public PlayerLoaderData(final ServerPlayer player, final PlayerChunkLoader loader) {
++ this.player = player;
++ this.loader = loader;
++ }
++
++ // these view distance methods are for api
++ public int getTargetSendViewDistance() {
++ final int tickViewDistance = this.tickViewDistance == -1 ? this.loader.getTickDistance() : this.tickViewDistance;
++ final int loadViewDistance = Math.max(tickViewDistance + 1, this.loadViewDistance == -1 ? this.loader.getLoadDistance() : this.loadViewDistance);
++ final int clientViewDistance = this.getClientViewDistance();
++ final int sendViewDistance = Math.min(loadViewDistance, this.sendViewDistance == -1 ? (!GlobalConfiguration.get().chunkLoading.autoconfigSendDistance || clientViewDistance == -1 ? this.loader.getSendDistance() : clientViewDistance + 1) : this.sendViewDistance);
++ return sendViewDistance;
++ }
++
++ public void setTargetSendViewDistance(final int distance) {
++ if (distance != -1 && (distance < MIN_VIEW_DISTANCE || distance > MAX_VIEW_DISTANCE + 1)) {
++ throw new IllegalArgumentException("Send view distance must be a number between " + MIN_VIEW_DISTANCE + " and " + (MAX_VIEW_DISTANCE + 1) + " or -1, got: " + distance);
++ }
++ this.sendViewDistance = distance;
++ }
++
++ public int getTargetNoTickViewDistance() {
++ return (this.loadViewDistance == -1 ? this.getLoadDistance() : this.loadViewDistance) - 1;
++ }
++
++ public void setTargetNoTickViewDistance(final int distance) {
++ if (distance != -1 && (distance < MIN_VIEW_DISTANCE || distance > MAX_VIEW_DISTANCE)) {
++ throw new IllegalArgumentException("Simulation distance must be a number between " + MIN_VIEW_DISTANCE + " and " + MAX_VIEW_DISTANCE + " or -1, got: " + distance);
++ }
++ this.loadViewDistance = distance == -1 ? -1 : distance + 1;
++ }
++
++ public int getTargetTickViewDistance() {
++ return this.tickViewDistance == -1 ? this.loader.getTickDistance() : this.tickViewDistance;
++ }
++
++ public void setTargetTickViewDistance(final int distance) {
++ if (distance != -1 && (distance < MIN_VIEW_DISTANCE || distance > MAX_VIEW_DISTANCE)) {
++ throw new IllegalArgumentException("View distance must be a number between " + MIN_VIEW_DISTANCE + " and " + MAX_VIEW_DISTANCE + " or -1, got: " + distance);
++ }
++ this.tickViewDistance = distance;
++ }
++
++ protected int getLoadDistance() {
++ final int tickViewDistance = this.tickViewDistance == -1 ? this.loader.getTickDistance() : this.tickViewDistance;
++
++ return Math.max(tickViewDistance + 1, this.loadViewDistance == -1 ? this.loader.getLoadDistance() : this.loadViewDistance);
++ }
++
++ public boolean hasSentChunk(final int chunkX, final int chunkZ) {
++ return this.sentChunks.contains(CoordinateUtils.getChunkKey(chunkX, chunkZ));
++ }
++
++ public void sendChunk(final int chunkX, final int chunkZ, final Runnable onChunkSend) {
++ if (this.sentChunks.add(CoordinateUtils.getChunkKey(chunkX, chunkZ))) {
++ this.player.getLevel().getChunkSource().chunkMap.updateChunkTracking(this.player,
++ new ChunkPos(chunkX, chunkZ), new MutableObject<>(), false, true); // unloaded, loaded
++ this.player.connection.connection.execute(onChunkSend);
++ } else {
++ throw new IllegalStateException();
++ }
++ }
++
++ public void unloadChunk(final int chunkX, final int chunkZ) {
++ if (this.sentChunks.remove(CoordinateUtils.getChunkKey(chunkX, chunkZ))) {
++ this.player.getLevel().getChunkSource().chunkMap.updateChunkTracking(this.player,
++ new ChunkPos(chunkX, chunkZ), null, true, false); // unloaded, loaded
++ }
++ }
++
++ protected static boolean wantChunkLoaded(final int centerX, final int centerZ, final int chunkX, final int chunkZ,
++ final int sendRadius) {
++ // expect sendRadius to be = 1 + target viewable radius
++ return ChunkMap.isChunkInRange(chunkX, chunkZ, centerX, centerZ, sendRadius);
++ }
++
++ protected static boolean triangleIntersects(final double p1x, final double p1z, // triangle point
++ final double p2x, final double p2z, // triangle point
++ final double p3x, final double p3z, // triangle point
++
++ final double targetX, final double targetZ) { // point
++ // from barycentric coordinates:
++ // targetX = a*p1x + b*p2x + c*p3x
++ // targetZ = a*p1z + b*p2z + c*p3z
++ // 1.0 = a*1.0 + b*1.0 + c*1.0
++ // where a, b, c >= 0.0
++ // so, if any of a, b, c are less-than zero then there is no intersection.
++
++ // d = ((p2z - p3z)(p1x - p3x) + (p3x - p2x)(p1z - p3z))
++ // a = ((p2z - p3z)(targetX - p3x) + (p3x - p2x)(targetZ - p3z)) / d
++ // b = ((p3z - p1z)(targetX - p3x) + (p1x - p3x)(targetZ - p3z)) / d
++ // c = 1.0 - a - b
++
++ final double d = (p2z - p3z)*(p1x - p3x) + (p3x - p2x)*(p1z - p3z);
++ final double a = ((p2z - p3z)*(targetX - p3x) + (p3x - p2x)*(targetZ - p3z)) / d;
++
++ if (a < 0.0 || a > 1.0) {
++ return false;
++ }
++
++ final double b = ((p3z - p1z)*(targetX - p3x) + (p1x - p3x)*(targetZ - p3z)) / d;
++ if (b < 0.0 || b > 1.0) {
++ return false;
++ }
++
++ final double c = 1.0 - a - b;
++
++ return c >= 0.0 && c <= 1.0;
++ }
++
++ public void remove() {
++ this.loader.broadcastMap.remove(this.player);
++ this.loader.loadMap.remove(this.player);
++ this.loader.loadTicketCleanup.remove(this.player);
++ this.loader.tickMap.remove(this.player);
++ }
++
++ protected int getClientViewDistance() {
++ return this.player.clientViewDistance == null ? -1 : Math.max(0, this.player.clientViewDistance.intValue());
++ }
++
++ public void update() {
++ final int tickViewDistance = this.tickViewDistance == -1 ? this.loader.getTickDistance() : this.tickViewDistance;
++ // load view cannot be less-than tick view + 1
++ final int loadViewDistance = Math.max(tickViewDistance + 1, this.loadViewDistance == -1 ? this.loader.getLoadDistance() : this.loadViewDistance);
++ // send view cannot be greater-than load view
++ final int clientViewDistance = this.getClientViewDistance();
++ final int sendViewDistance = Math.min(loadViewDistance, this.sendViewDistance == -1 ? (!GlobalConfiguration.get().chunkLoading.autoconfigSendDistance || clientViewDistance == -1 ? this.loader.getSendDistance() : clientViewDistance + 1) : this.sendViewDistance);
++
++ final double posX = this.player.getX();
++ final double posZ = this.player.getZ();
++ final float yaw = MCUtil.normalizeYaw(this.player.getYRot() + 90.0f); // mc yaw 0 is along the positive z axis, but obviously this is really dumb - offset so we are at positive x-axis
++
++ // in general, we really only want to prioritise chunks in front if we know we're moving pretty fast into them.
++ final boolean useLookPriority = GlobalConfiguration.get().chunkLoading.enableFrustumPriority && (this.player.getDeltaMovement().horizontalDistanceSqr() > LOOK_PRIORITY_SPEED_THRESHOLD ||
++ this.player.getAbilities().flying);
++
++ // make sure we're in the send queue
++ this.loader.chunkSendWaitQueue.add(this);
++
++ if (
++ // has view distance stayed the same?
++ sendViewDistance == this.lastSendDistance
++ && loadViewDistance == this.lastLoadDistance
++ && tickViewDistance == this.lastTickDistance
++
++ && (this.usingLookingPriority ? (
++ // has our block stayed the same (this also accounts for chunk change)?
++ Mth.floor(this.lastLocX) == Mth.floor(posX)
++ && Mth.floor(this.lastLocZ) == Mth.floor(posZ)
++ ) : (
++ // has our chunk stayed the same
++ (Mth.floor(this.lastLocX) >> 4) == (Mth.floor(posX) >> 4)
++ && (Mth.floor(this.lastLocZ) >> 4) == (Mth.floor(posZ) >> 4)
++ ))
++
++ // has our decision about look priority changed?
++ && this.usingLookingPriority == useLookPriority
++
++ // if we are currently using look priority, has our yaw stayed within recalc threshold?
++ && (!this.usingLookingPriority || Math.abs(yaw - this.lastYaw) <= LOOK_PRIORITY_YAW_DELTA_RECALC_THRESHOLD)
++ ) {
++ // nothing we care about changed, so we're not re-calculating
++ return;
++ }
++
++ final int centerChunkX = Mth.floor(posX) >> 4;
++ final int centerChunkZ = Mth.floor(posZ) >> 4;
++
++ final boolean needsChunkCenterUpdate = (centerChunkX != this.lastChunkX) || (centerChunkZ != this.lastChunkZ);
++ this.loader.broadcastMap.addOrUpdate(this.player, centerChunkX, centerChunkZ, sendViewDistance);
++ this.loader.loadMap.addOrUpdate(this.player, centerChunkX, centerChunkZ, loadViewDistance);
++ this.loader.loadTicketCleanup.addOrUpdate(this.player, centerChunkX, centerChunkZ, loadViewDistance + 1);
++ this.loader.tickMap.addOrUpdate(this.player, centerChunkX, centerChunkZ, tickViewDistance);
++
++ if (sendViewDistance != this.lastSendDistance) {
++ // update the view radius for client
++ // note that this should be after the map calls because the client wont expect unload calls not in its VD
++ // and it's possible we decreased VD here
++ this.player.connection.send(new ClientboundSetChunkCacheRadiusPacket(sendViewDistance));
++ }
++ if (tickViewDistance != this.lastTickDistance) {
++ this.player.connection.send(new ClientboundSetSimulationDistancePacket(tickViewDistance));
++ }
++
++ this.lastLocX = posX;
++ this.lastLocZ = posZ;
++ this.lastYaw = yaw;
++ this.lastSendDistance = sendViewDistance;
++ this.lastLoadDistance = loadViewDistance;
++ this.lastTickDistance = tickViewDistance;
++ this.usingLookingPriority = useLookPriority;
++
++ this.lastChunkX = centerChunkX;
++ this.lastChunkZ = centerChunkZ;
++
++ // points for player "view" triangle:
++
++ // obviously, the player pos is a vertex
++ final double p1x = posX;
++ final double p1z = posZ;
++
++ // to the left of the looking direction
++ final double p2x = PRIORITISED_DISTANCE * Math.cos(Math.toRadians(yaw + (double)(FOV / 2.0))) // calculate rotated vector
++ + p1x; // offset vector
++ final double p2z = PRIORITISED_DISTANCE * Math.sin(Math.toRadians(yaw + (double)(FOV / 2.0))) // calculate rotated vector
++ + p1z; // offset vector
++
++ // to the right of the looking direction
++ final double p3x = PRIORITISED_DISTANCE * Math.cos(Math.toRadians(yaw - (double)(FOV / 2.0))) // calculate rotated vector
++ + p1x; // offset vector
++ final double p3z = PRIORITISED_DISTANCE * Math.sin(Math.toRadians(yaw - (double)(FOV / 2.0))) // calculate rotated vector
++ + p1z; // offset vector
++
++ // now that we have all of our points, we can recalculate the load queue
++
++ final List loadQueue = new ArrayList<>();
++
++ // clear send queue, we are re-sorting
++ this.sendQueue.clear();
++ // clear chunk want set, vd/position might have changed
++ this.chunksToBeSent.clear();
++
++ final int searchViewDistance = Math.max(loadViewDistance, sendViewDistance);
++
++ for (int dx = -searchViewDistance; dx <= searchViewDistance; ++dx) {
++ for (int dz = -searchViewDistance; dz <= searchViewDistance; ++dz) {
++ final int chunkX = dx + centerChunkX;
++ final int chunkZ = dz + centerChunkZ;
++ final int squareDistance = Math.max(Math.abs(dx), Math.abs(dz));
++ final boolean sendChunk = squareDistance <= sendViewDistance && wantChunkLoaded(centerChunkX, centerChunkZ, chunkX, chunkZ, sendViewDistance);
++
++ if (this.hasSentChunk(chunkX, chunkZ)) {
++ // already sent (which means it is also loaded)
++ if (!sendChunk) {
++ // have sent the chunk, but don't want it anymore
++ // unload it now
++ this.unloadChunk(chunkX, chunkZ);
++ }
++ continue;
++ }
++
++ final boolean loadChunk = squareDistance <= loadViewDistance;
++
++ final boolean prioritised = useLookPriority && triangleIntersects(
++ // prioritisation triangle
++ p1x, p1z, p2x, p2z, p3x, p3z,
++
++ // center of chunk
++ (double)((chunkX << 4) | 8), (double)((chunkZ << 4) | 8)
++ );
++
++ final int manhattanDistance = Math.abs(dx) + Math.abs(dz);
++
++ final double priority;
++
++ if (squareDistance <= GlobalConfiguration.get().chunkLoading.minLoadRadius) {
++ // priority should be negative, and we also want to order it from center outwards
++ // so we want (0,0) to be the smallest, and (minLoadedRadius,minLoadedRadius) to be the greatest
++ priority = -((2 * GlobalConfiguration.get().chunkLoading.minLoadRadius + 1) - manhattanDistance);
++ } else {
++ if (prioritised) {
++ // we don't prioritise these chunks above others because we also want to make sure some chunks
++ // will be loaded if the player changes direction
++ priority = (double)manhattanDistance / 6.0;
++ } else {
++ priority = (double)manhattanDistance;
++ }
++ }
++
++ final ChunkPriorityHolder holder = new ChunkPriorityHolder(chunkX, chunkZ, manhattanDistance, priority);
++
++ if (!this.loader.isChunkPlayerLoaded(chunkX, chunkZ)) {
++ if (loadChunk) {
++ loadQueue.add(holder);
++ if (sendChunk) {
++ this.chunksToBeSent.add(CoordinateUtils.getChunkKey(chunkX, chunkZ));
++ }
++ }
++ } else {
++ // loaded but not sent: so queue it!
++ if (sendChunk) {
++ this.sendQueue.add(holder);
++ }
++ }
++ }
++ }
++
++ loadQueue.sort((final ChunkPriorityHolder p1, final ChunkPriorityHolder p2) -> {
++ return Double.compare(p1.priority, p2.priority);
++ });
++
++ // we're modifying loadQueue, must remove
++ this.loader.chunkLoadQueue.remove(this);
++
++ this.loadQueue.clear();
++ this.loadQueue.addAll(loadQueue);
++
++ // must re-add
++ this.loader.chunkLoadQueue.add(this);
++
++ // update the chunk center
++ // this must be done last so that the client does not ignore any of our unload chunk packets
++ if (needsChunkCenterUpdate) {
++ this.player.connection.send(new ClientboundSetChunkCacheCenterPacket(centerChunkX, centerChunkZ));
++ }
++ }
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/chunk/system/entity/EntityLookup.java b/src/main/java/io/papermc/paper/chunk/system/entity/EntityLookup.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..969f43ffc2e28aac45d1145d35ab37c8740b0880
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/chunk/system/entity/EntityLookup.java
+@@ -0,0 +1,839 @@
++package io.papermc.paper.chunk.system.entity;
++
++import com.destroystokyo.paper.util.maplist.EntityList;
++import com.mojang.logging.LogUtils;
++import io.papermc.paper.util.CoordinateUtils;
++import io.papermc.paper.util.TickThread;
++import io.papermc.paper.util.WorldUtil;
++import io.papermc.paper.world.ChunkEntitySlices;
++import it.unimi.dsi.fastutil.ints.Int2ReferenceOpenHashMap;
++import it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap;
++import it.unimi.dsi.fastutil.objects.Object2ReferenceOpenHashMap;
++import net.minecraft.core.BlockPos;
++import net.minecraft.server.ChunkSystem;
++import net.minecraft.server.level.ChunkHolder;
++import net.minecraft.server.level.ChunkMap;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.util.Mth;
++import net.minecraft.world.entity.Entity;
++import net.minecraft.world.entity.EntityType;
++import net.minecraft.world.level.entity.EntityInLevelCallback;
++import net.minecraft.world.level.entity.EntityTypeTest;
++import net.minecraft.world.level.entity.LevelCallback;
++import net.minecraft.world.level.entity.LevelEntityGetter;
++import net.minecraft.world.level.entity.Visibility;
++import net.minecraft.world.phys.AABB;
++import org.jetbrains.annotations.NotNull;
++import org.jetbrains.annotations.Nullable;
++import org.slf4j.Logger;
++import java.util.ArrayList;
++import java.util.Iterator;
++import java.util.List;
++import java.util.NoSuchElementException;
++import java.util.UUID;
++import java.util.concurrent.locks.StampedLock;
++import java.util.function.Consumer;
++import java.util.function.Predicate;
++
++public final class EntityLookup implements LevelEntityGetter {
++
++ private static final Logger LOGGER = LogUtils.getLogger();
++
++ protected static final int REGION_SHIFT = 5;
++ protected static final int REGION_MASK = (1 << REGION_SHIFT) - 1;
++ protected static final int REGION_SIZE = 1 << REGION_SHIFT;
++
++ public final ServerLevel world;
++
++ private final StampedLock stateLock = new StampedLock();
++ protected final Long2ObjectOpenHashMap regions = new Long2ObjectOpenHashMap<>(128, 0.5f);
++
++ private final int minSection; // inclusive
++ private final int maxSection; // inclusive
++ private final LevelCallback worldCallback;
++
++ private final StampedLock entityByLock = new StampedLock();
++ private final Int2ReferenceOpenHashMap entityById = new Int2ReferenceOpenHashMap<>();
++ private final Object2ReferenceOpenHashMap entityByUUID = new Object2ReferenceOpenHashMap<>();
++ private final EntityList accessibleEntities = new EntityList();
++
++ public EntityLookup(final ServerLevel world, final LevelCallback worldCallback) {
++ this.world = world;
++ this.minSection = WorldUtil.getMinSection(world);
++ this.maxSection = WorldUtil.getMaxSection(world);
++ this.worldCallback = worldCallback;
++ }
++
++ private static Entity maskNonAccessible(final Entity entity) {
++ if (entity == null) {
++ return null;
++ }
++ final Visibility visibility = EntityLookup.getEntityStatus(entity);
++ return visibility.isAccessible() ? entity : null;
++ }
++
++ @Nullable
++ @Override
++ public Entity get(final int id) {
++ final long attempt = this.entityByLock.tryOptimisticRead();
++ if (attempt != 0L) {
++ try {
++ final Entity ret = this.entityById.get(id);
++
++ if (this.entityByLock.validate(attempt)) {
++ return maskNonAccessible(ret);
++ }
++ } catch (final Error error) {
++ throw error;
++ } catch (final Throwable thr) {
++ // ignore
++ }
++ }
++
++ this.entityByLock.readLock();
++ try {
++ return maskNonAccessible(this.entityById.get(id));
++ } finally {
++ this.entityByLock.tryUnlockRead();
++ }
++ }
++
++ @Nullable
++ @Override
++ public Entity get(final UUID id) {
++ final long attempt = this.entityByLock.tryOptimisticRead();
++ if (attempt != 0L) {
++ try {
++ final Entity ret = this.entityByUUID.get(id);
++
++ if (this.entityByLock.validate(attempt)) {
++ return maskNonAccessible(ret);
++ }
++ } catch (final Error error) {
++ throw error;
++ } catch (final Throwable thr) {
++ // ignore
++ }
++ }
++
++ this.entityByLock.readLock();
++ try {
++ return maskNonAccessible(this.entityByUUID.get(id));
++ } finally {
++ this.entityByLock.tryUnlockRead();
++ }
++ }
++
++ public boolean hasEntity(final UUID uuid) {
++ return this.get(uuid) != null;
++ }
++
++ public String getDebugInfo() {
++ return "count_id:" + this.entityById.size() + ",count_uuid:" + this.entityByUUID.size() + ",region_count:" + this.regions.size();
++ }
++
++ static final class ArrayIterable implements Iterable {
++
++ private final T[] array;
++ private final int off;
++ private final int length;
++
++ public ArrayIterable(final T[] array, final int off, final int length) {
++ this.array = array;
++ this.off = off;
++ this.length = length;
++ if (length > array.length) {
++ throw new IllegalArgumentException("Length must be no greater-than the array length");
++ }
++ }
++
++ @NotNull
++ @Override
++ public Iterator iterator() {
++ return new ArrayIterator<>(this.array, this.off, this.length);
++ }
++
++ static final class ArrayIterator implements Iterator {
++
++ private final T[] array;
++ private int off;
++ private final int length;
++
++ public ArrayIterator(final T[] array, final int off, final int length) {
++ this.array = array;
++ this.off = off;
++ this.length = length;
++ }
++
++ @Override
++ public boolean hasNext() {
++ return this.off < this.length;
++ }
++
++ @Override
++ public T next() {
++ if (this.off >= this.length) {
++ throw new NoSuchElementException();
++ }
++ return this.array[this.off++];
++ }
++
++ @Override
++ public void remove() {
++ throw new UnsupportedOperationException();
++ }
++ }
++ }
++
++ @Override
++ public Iterable getAll() {
++ return new ArrayIterable<>(this.accessibleEntities.getRawData(), 0, this.accessibleEntities.size());
++ }
++
++ @Override
++ public void get(final EntityTypeTest filter, final Consumer action) {
++ for (final Entity entity : this.entityById.values()) {
++ final Visibility visibility = EntityLookup.getEntityStatus(entity);
++ if (!visibility.isAccessible()) {
++ continue;
++ }
++ final U casted = filter.tryCast(entity);
++ if (casted != null) {
++ action.accept(casted);
++ }
++ }
++ }
++
++ @Override
++ public void get(final AABB box, final Consumer action) {
++ List entities = new ArrayList<>();
++ this.getEntitiesWithoutDragonParts(null, box, entities, null);
++ for (int i = 0, len = entities.size(); i < len; ++i) {
++ action.accept(entities.get(i));
++ }
++ }
++
++ @Override
++ public void get(final EntityTypeTest filter, final AABB box, final Consumer action) {
++ List entities = new ArrayList<>();
++ this.getEntitiesWithoutDragonParts(null, box, entities, null);
++ for (int i = 0, len = entities.size(); i < len; ++i) {
++ final U casted = filter.tryCast(entities.get(i));
++ if (casted != null) {
++ action.accept(casted);
++ }
++ }
++ }
++
++ public void entityStatusChange(final Entity entity, final ChunkEntitySlices slices, final Visibility oldVisibility, final Visibility newVisibility, final boolean moved,
++ final boolean created, final boolean destroyed) {
++ TickThread.ensureTickThread(entity, "Entity status change must only happen on the main thread");
++
++ if (entity.updatingSectionStatus) {
++ // recursive status update
++ LOGGER.error("Cannot recursively update entity chunk status for entity " + entity, new Throwable());
++ return;
++ }
++
++ final boolean entityStatusUpdateBefore = slices == null ? false : slices.startPreventingStatusUpdates();
++
++ if (entityStatusUpdateBefore) {
++ LOGGER.error("Cannot update chunk status for entity " + entity + " since entity chunk (" + slices.chunkX + "," + slices.chunkZ + ") is receiving update", new Throwable());
++ return;
++ }
++
++ try {
++ final Boolean ticketBlockBefore = this.world.chunkTaskScheduler.chunkHolderManager.blockTicketUpdates();
++ try {
++ entity.updatingSectionStatus = true;
++ try {
++ if (created) {
++ EntityLookup.this.worldCallback.onCreated(entity);
++ }
++
++ if (oldVisibility == newVisibility) {
++ if (moved && newVisibility.isAccessible()) {
++ EntityLookup.this.worldCallback.onSectionChange(entity);
++ }
++ return;
++ }
++
++ if (newVisibility.ordinal() > oldVisibility.ordinal()) {
++ // status upgrade
++ if (!oldVisibility.isAccessible() && newVisibility.isAccessible()) {
++ this.accessibleEntities.add(entity);
++ EntityLookup.this.worldCallback.onTrackingStart(entity);
++ }
++
++ if (!oldVisibility.isTicking() && newVisibility.isTicking()) {
++ EntityLookup.this.worldCallback.onTickingStart(entity);
++ }
++ } else {
++ // status downgrade
++ if (oldVisibility.isTicking() && !newVisibility.isTicking()) {
++ EntityLookup.this.worldCallback.onTickingEnd(entity);
++ }
++
++ if (oldVisibility.isAccessible() && !newVisibility.isAccessible()) {
++ this.accessibleEntities.remove(entity);
++ EntityLookup.this.worldCallback.onTrackingEnd(entity);
++ }
++ }
++
++ if (moved && newVisibility.isAccessible()) {
++ EntityLookup.this.worldCallback.onSectionChange(entity);
++ }
++
++ if (destroyed) {
++ EntityLookup.this.worldCallback.onDestroyed(entity);
++ }
++ } finally {
++ entity.updatingSectionStatus = false;
++ }
++ } finally {
++ this.world.chunkTaskScheduler.chunkHolderManager.unblockTicketUpdates(ticketBlockBefore);
++ }
++ } finally {
++ if (slices != null) {
++ slices.stopPreventingStatusUpdates(false);
++ }
++ }
++ }
++
++ public void chunkStatusChange(final int x, final int z, final ChunkHolder.FullChunkStatus newStatus) {
++ this.getChunk(x, z).updateStatus(newStatus, this);
++ }
++
++ public void addLegacyChunkEntities(final List entities) {
++ for (int i = 0, len = entities.size(); i < len; ++i) {
++ this.addEntity(entities.get(i), true);
++ }
++ }
++
++ public void addEntityChunkEntities(final List entities) {
++ for (int i = 0, len = entities.size(); i < len; ++i) {
++ this.addEntity(entities.get(i), true);
++ }
++ }
++
++ public void addWorldGenChunkEntities(final List entities) {
++ for (int i = 0, len = entities.size(); i < len; ++i) {
++ this.addEntity(entities.get(i), false);
++ }
++ }
++
++ public boolean addNewEntity(final Entity entity) {
++ return this.addEntity(entity, false);
++ }
++
++ public static Visibility getEntityStatus(final Entity entity) {
++ if (entity.isAlwaysTicking()) {
++ return Visibility.TICKING;
++ }
++ final ChunkHolder.FullChunkStatus entityStatus = entity.chunkStatus;
++ return Visibility.fromFullChunkStatus(entityStatus == null ? ChunkHolder.FullChunkStatus.INACCESSIBLE : entityStatus);
++ }
++
++ private boolean addEntity(final Entity entity, final boolean fromDisk) {
++ final BlockPos pos = entity.blockPosition();
++ final int sectionX = pos.getX() >> 4;
++ final int sectionY = Mth.clamp(pos.getY() >> 4, this.minSection, this.maxSection);
++ final int sectionZ = pos.getZ() >> 4;
++ TickThread.ensureTickThread(this.world, sectionX, sectionZ, "Cannot add entity off-main thread");
++
++ if (entity.isRemoved()) {
++ LOGGER.warn("Refusing to add removed entity: " + entity);
++ return false;
++ }
++
++ if (entity.updatingSectionStatus) {
++ LOGGER.warn("Entity " + entity + " is currently prevented from being added/removed to world since it is processing section status updates", new Throwable());
++ return false;
++ }
++
++ if (fromDisk) {
++ ChunkSystem.onEntityPreAdd(this.world, entity);
++ if (entity.isRemoved()) {
++ // removed from checkDupeUUID call
++ return false;
++ }
++ }
++
++ this.entityByLock.writeLock();
++ try {
++ if (this.entityById.containsKey(entity.getId())) {
++ LOGGER.warn("Entity id already exists: " + entity.getId() + ", mapped to " + this.entityById.get(entity.getId()) + ", can't add " + entity, new Throwable());
++ return false;
++ }
++ if (this.entityByUUID.containsKey(entity.getUUID())) {
++ LOGGER.warn("Entity uuid already exists: " + entity.getUUID() + ", mapped to " + this.entityByUUID.get(entity.getUUID()) + ", can't add " + entity, new Throwable());
++ return false;
++ }
++ this.entityById.put(entity.getId(), entity);
++ this.entityByUUID.put(entity.getUUID(), entity);
++ } finally {
++ this.entityByLock.tryUnlockWrite();
++ }
++
++ entity.sectionX = sectionX;
++ entity.sectionY = sectionY;
++ entity.sectionZ = sectionZ;
++ final ChunkEntitySlices slices = this.getOrCreateChunk(sectionX, sectionZ);
++ if (!slices.addEntity(entity, sectionY)) {
++ LOGGER.warn("Entity " + entity + " added to world '" + this.world.getWorld().getName() + "', but was already contained in entity chunk (" + sectionX + "," + sectionZ + ")");
++ }
++
++ entity.setLevelCallback(new EntityCallback(entity));
++
++ this.entityStatusChange(entity, slices, Visibility.HIDDEN, getEntityStatus(entity), false, !fromDisk, false);
++
++ return true;
++ }
++
++ private void removeEntity(final Entity entity) {
++ final int sectionX = entity.sectionX;
++ final int sectionY = entity.sectionY;
++ final int sectionZ = entity.sectionZ;
++ TickThread.ensureTickThread(this.world, sectionX, sectionZ, "Cannot remove entity off-main");
++ if (!entity.isRemoved()) {
++ throw new IllegalStateException("Only call Entity#setRemoved to remove an entity");
++ }
++ final ChunkEntitySlices slices = this.getChunk(sectionX, sectionZ);
++ // all entities should be in a chunk
++ if (slices == null) {
++ LOGGER.warn("Cannot remove entity " + entity + " from null entity slices (" + sectionX + "," + sectionZ + ")");
++ } else {
++ if (!slices.removeEntity(entity, sectionY)) {
++ LOGGER.warn("Failed to remove entity " + entity + " from entity slices (" + sectionX + "," + sectionZ + ")");
++ }
++ }
++ entity.sectionX = entity.sectionY = entity.sectionZ = Integer.MIN_VALUE;
++
++ this.entityByLock.writeLock();
++ try {
++ if (!this.entityById.remove(entity.getId(), entity)) {
++ LOGGER.warn("Failed to remove entity " + entity + " by id, current entity mapped: " + this.entityById.get(entity.getId()));
++ }
++ if (!this.entityByUUID.remove(entity.getUUID(), entity)) {
++ LOGGER.warn("Failed to remove entity " + entity + " by uuid, current entity mapped: " + this.entityByUUID.get(entity.getUUID()));
++ }
++ } finally {
++ this.entityByLock.tryUnlockWrite();
++ }
++ }
++
++ private ChunkEntitySlices moveEntity(final Entity entity) {
++ // ensure we own the entity
++ TickThread.ensureTickThread(entity, "Cannot move entity off-main");
++
++ final BlockPos newPos = entity.blockPosition();
++ final int newSectionX = newPos.getX() >> 4;
++ final int newSectionY = Mth.clamp(newPos.getY() >> 4, this.minSection, this.maxSection);
++ final int newSectionZ = newPos.getZ() >> 4;
++
++ if (newSectionX == entity.sectionX && newSectionY == entity.sectionY && newSectionZ == entity.sectionZ) {
++ return null;
++ }
++
++ // ensure the new section is owned by this tick thread
++ TickThread.ensureTickThread(this.world, newSectionX, newSectionZ, "Cannot move entity off-main");
++
++ // ensure the old section is owned by this tick thread
++ TickThread.ensureTickThread(this.world, entity.sectionX, entity.sectionZ, "Cannot move entity off-main");
++
++ final ChunkEntitySlices old = this.getChunk(entity.sectionX, entity.sectionZ);
++ final ChunkEntitySlices slices = this.getOrCreateChunk(newSectionX, newSectionZ);
++
++ if (!old.removeEntity(entity, entity.sectionY)) {
++ LOGGER.warn("Could not remove entity " + entity + " from its old chunk section (" + entity.sectionX + "," + entity.sectionY + "," + entity.sectionZ + ") since it was not contained in the section");
++ }
++
++ if (!slices.addEntity(entity, newSectionY)) {
++ LOGGER.warn("Could not add entity " + entity + " to its new chunk section (" + newSectionX + "," + newSectionY + "," + newSectionZ + ") as it is already contained in the section");
++ }
++
++ entity.sectionX = newSectionX;
++ entity.sectionY = newSectionY;
++ entity.sectionZ = newSectionZ;
++
++ return slices;
++ }
++
++ public void getEntitiesWithoutDragonParts(final Entity except, final AABB box, final List into, final Predicate super Entity> predicate) {
++ final int minChunkX = (Mth.floor(box.minX) - 2) >> 4;
++ final int minChunkZ = (Mth.floor(box.minZ) - 2) >> 4;
++ final int maxChunkX = (Mth.floor(box.maxX) + 2) >> 4;
++ final int maxChunkZ = (Mth.floor(box.maxZ) + 2) >> 4;
++
++ final int minRegionX = minChunkX >> REGION_SHIFT;
++ final int minRegionZ = minChunkZ >> REGION_SHIFT;
++ final int maxRegionX = maxChunkX >> REGION_SHIFT;
++ final int maxRegionZ = maxChunkZ >> REGION_SHIFT;
++
++ for (int currRegionZ = minRegionZ; currRegionZ <= maxRegionZ; ++currRegionZ) {
++ final int minZ = currRegionZ == minRegionZ ? minChunkZ & REGION_MASK : 0;
++ final int maxZ = currRegionZ == maxRegionZ ? maxChunkZ & REGION_MASK : REGION_MASK;
++
++ for (int currRegionX = minRegionX; currRegionX <= maxRegionX; ++currRegionX) {
++ final ChunkSlicesRegion region = this.getRegion(currRegionX, currRegionZ);
++
++ if (region == null) {
++ continue;
++ }
++
++ final int minX = currRegionX == minRegionX ? minChunkX & REGION_MASK : 0;
++ final int maxX = currRegionX == maxRegionX ? maxChunkX & REGION_MASK : REGION_MASK;
++
++ for (int currZ = minZ; currZ <= maxZ; ++currZ) {
++ for (int currX = minX; currX <= maxX; ++currX) {
++ final ChunkEntitySlices chunk = region.get(currX | (currZ << REGION_SHIFT));
++ if (chunk == null || !chunk.status.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
++ continue;
++ }
++
++ chunk.getEntitiesWithoutDragonParts(except, box, into, predicate);
++ }
++ }
++ }
++ }
++ }
++
++ public void getEntities(final Entity except, final AABB box, final List into, final Predicate super Entity> predicate) {
++ final int minChunkX = (Mth.floor(box.minX) - 2) >> 4;
++ final int minChunkZ = (Mth.floor(box.minZ) - 2) >> 4;
++ final int maxChunkX = (Mth.floor(box.maxX) + 2) >> 4;
++ final int maxChunkZ = (Mth.floor(box.maxZ) + 2) >> 4;
++
++ final int minRegionX = minChunkX >> REGION_SHIFT;
++ final int minRegionZ = minChunkZ >> REGION_SHIFT;
++ final int maxRegionX = maxChunkX >> REGION_SHIFT;
++ final int maxRegionZ = maxChunkZ >> REGION_SHIFT;
++
++ for (int currRegionZ = minRegionZ; currRegionZ <= maxRegionZ; ++currRegionZ) {
++ final int minZ = currRegionZ == minRegionZ ? minChunkZ & REGION_MASK : 0;
++ final int maxZ = currRegionZ == maxRegionZ ? maxChunkZ & REGION_MASK : REGION_MASK;
++
++ for (int currRegionX = minRegionX; currRegionX <= maxRegionX; ++currRegionX) {
++ final ChunkSlicesRegion region = this.getRegion(currRegionX, currRegionZ);
++
++ if (region == null) {
++ continue;
++ }
++
++ final int minX = currRegionX == minRegionX ? minChunkX & REGION_MASK : 0;
++ final int maxX = currRegionX == maxRegionX ? maxChunkX & REGION_MASK : REGION_MASK;
++
++ for (int currZ = minZ; currZ <= maxZ; ++currZ) {
++ for (int currX = minX; currX <= maxX; ++currX) {
++ final ChunkEntitySlices chunk = region.get(currX | (currZ << REGION_SHIFT));
++ if (chunk == null || !chunk.status.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
++ continue;
++ }
++
++ chunk.getEntities(except, box, into, predicate);
++ }
++ }
++ }
++ }
++ }
++
++ public void getHardCollidingEntities(final Entity except, final AABB box, final List into, final Predicate super Entity> predicate) {
++ final int minChunkX = (Mth.floor(box.minX) - 2) >> 4;
++ final int minChunkZ = (Mth.floor(box.minZ) - 2) >> 4;
++ final int maxChunkX = (Mth.floor(box.maxX) + 2) >> 4;
++ final int maxChunkZ = (Mth.floor(box.maxZ) + 2) >> 4;
++
++ final int minRegionX = minChunkX >> REGION_SHIFT;
++ final int minRegionZ = minChunkZ >> REGION_SHIFT;
++ final int maxRegionX = maxChunkX >> REGION_SHIFT;
++ final int maxRegionZ = maxChunkZ >> REGION_SHIFT;
++
++ for (int currRegionZ = minRegionZ; currRegionZ <= maxRegionZ; ++currRegionZ) {
++ final int minZ = currRegionZ == minRegionZ ? minChunkZ & REGION_MASK : 0;
++ final int maxZ = currRegionZ == maxRegionZ ? maxChunkZ & REGION_MASK : REGION_MASK;
++
++ for (int currRegionX = minRegionX; currRegionX <= maxRegionX; ++currRegionX) {
++ final ChunkSlicesRegion region = this.getRegion(currRegionX, currRegionZ);
++
++ if (region == null) {
++ continue;
++ }
++
++ final int minX = currRegionX == minRegionX ? minChunkX & REGION_MASK : 0;
++ final int maxX = currRegionX == maxRegionX ? maxChunkX & REGION_MASK : REGION_MASK;
++
++ for (int currZ = minZ; currZ <= maxZ; ++currZ) {
++ for (int currX = minX; currX <= maxX; ++currX) {
++ final ChunkEntitySlices chunk = region.get(currX | (currZ << REGION_SHIFT));
++ if (chunk == null || !chunk.status.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
++ continue;
++ }
++
++ chunk.getHardCollidingEntities(except, box, into, predicate);
++ }
++ }
++ }
++ }
++ }
++
++ public void getEntities(final EntityType> type, final AABB box, final List super T> into,
++ final Predicate super T> predicate) {
++ final int minChunkX = (Mth.floor(box.minX) - 2) >> 4;
++ final int minChunkZ = (Mth.floor(box.minZ) - 2) >> 4;
++ final int maxChunkX = (Mth.floor(box.maxX) + 2) >> 4;
++ final int maxChunkZ = (Mth.floor(box.maxZ) + 2) >> 4;
++
++ final int minRegionX = minChunkX >> REGION_SHIFT;
++ final int minRegionZ = minChunkZ >> REGION_SHIFT;
++ final int maxRegionX = maxChunkX >> REGION_SHIFT;
++ final int maxRegionZ = maxChunkZ >> REGION_SHIFT;
++
++ for (int currRegionZ = minRegionZ; currRegionZ <= maxRegionZ; ++currRegionZ) {
++ final int minZ = currRegionZ == minRegionZ ? minChunkZ & REGION_MASK : 0;
++ final int maxZ = currRegionZ == maxRegionZ ? maxChunkZ & REGION_MASK : REGION_MASK;
++
++ for (int currRegionX = minRegionX; currRegionX <= maxRegionX; ++currRegionX) {
++ final ChunkSlicesRegion region = this.getRegion(currRegionX, currRegionZ);
++
++ if (region == null) {
++ continue;
++ }
++
++ final int minX = currRegionX == minRegionX ? minChunkX & REGION_MASK : 0;
++ final int maxX = currRegionX == maxRegionX ? maxChunkX & REGION_MASK : REGION_MASK;
++
++ for (int currZ = minZ; currZ <= maxZ; ++currZ) {
++ for (int currX = minX; currX <= maxX; ++currX) {
++ final ChunkEntitySlices chunk = region.get(currX | (currZ << REGION_SHIFT));
++ if (chunk == null || !chunk.status.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
++ continue;
++ }
++
++ chunk.getEntities(type, box, (List)into, (Predicate)predicate);
++ }
++ }
++ }
++ }
++ }
++
++ public void getEntities(final Class extends T> clazz, final Entity except, final AABB box, final List super T> into,
++ final Predicate super T> predicate) {
++ final int minChunkX = (Mth.floor(box.minX) - 2) >> 4;
++ final int minChunkZ = (Mth.floor(box.minZ) - 2) >> 4;
++ final int maxChunkX = (Mth.floor(box.maxX) + 2) >> 4;
++ final int maxChunkZ = (Mth.floor(box.maxZ) + 2) >> 4;
++
++ final int minRegionX = minChunkX >> REGION_SHIFT;
++ final int minRegionZ = minChunkZ >> REGION_SHIFT;
++ final int maxRegionX = maxChunkX >> REGION_SHIFT;
++ final int maxRegionZ = maxChunkZ >> REGION_SHIFT;
++
++ for (int currRegionZ = minRegionZ; currRegionZ <= maxRegionZ; ++currRegionZ) {
++ final int minZ = currRegionZ == minRegionZ ? minChunkZ & REGION_MASK : 0;
++ final int maxZ = currRegionZ == maxRegionZ ? maxChunkZ & REGION_MASK : REGION_MASK;
++
++ for (int currRegionX = minRegionX; currRegionX <= maxRegionX; ++currRegionX) {
++ final ChunkSlicesRegion region = this.getRegion(currRegionX, currRegionZ);
++
++ if (region == null) {
++ continue;
++ }
++
++ final int minX = currRegionX == minRegionX ? minChunkX & REGION_MASK : 0;
++ final int maxX = currRegionX == maxRegionX ? maxChunkX & REGION_MASK : REGION_MASK;
++
++ for (int currZ = minZ; currZ <= maxZ; ++currZ) {
++ for (int currX = minX; currX <= maxX; ++currX) {
++ final ChunkEntitySlices chunk = region.get(currX | (currZ << REGION_SHIFT));
++ if (chunk == null || !chunk.status.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
++ continue;
++ }
++
++ chunk.getEntities(clazz, except, box, into, predicate);
++ }
++ }
++ }
++ }
++ }
++
++ public void entitySectionLoad(final int chunkX, final int chunkZ, final ChunkEntitySlices slices) {
++ TickThread.ensureTickThread(this.world, chunkX, chunkZ, "Cannot load in entity section off-main");
++ synchronized (this) {
++ final ChunkEntitySlices curr = this.getChunk(chunkX, chunkZ);
++ if (curr != null) {
++ this.removeChunk(chunkX, chunkZ);
++
++ curr.mergeInto(slices);
++
++ this.addChunk(chunkX, chunkZ, slices);
++ } else {
++ this.addChunk(chunkX, chunkZ, slices);
++ }
++ }
++ }
++
++ public void entitySectionUnload(final int chunkX, final int chunkZ) {
++ TickThread.ensureTickThread(this.world, chunkX, chunkZ, "Cannot unload entity section off-main");
++ this.removeChunk(chunkX, chunkZ);
++ }
++
++ public ChunkEntitySlices getChunk(final int chunkX, final int chunkZ) {
++ final ChunkSlicesRegion region = this.getRegion(chunkX >> REGION_SHIFT, chunkZ >> REGION_SHIFT);
++ if (region == null) {
++ return null;
++ }
++
++ return region.get((chunkX & REGION_MASK) | ((chunkZ & REGION_MASK) << REGION_SHIFT));
++ }
++
++ public ChunkEntitySlices getOrCreateChunk(final int chunkX, final int chunkZ) {
++ final ChunkSlicesRegion region = this.getRegion(chunkX >> REGION_SHIFT, chunkZ >> REGION_SHIFT);
++ ChunkEntitySlices ret;
++ if (region == null || (ret = region.get((chunkX & REGION_MASK) | ((chunkZ & REGION_MASK) << REGION_SHIFT))) == null) {
++ // loadInEntityChunk will call addChunk for us
++ return this.world.chunkTaskScheduler.chunkHolderManager.getOrCreateEntityChunk(chunkX, chunkZ, true);
++ }
++
++ return ret;
++ }
++
++ public ChunkSlicesRegion getRegion(final int regionX, final int regionZ) {
++ final long key = CoordinateUtils.getChunkKey(regionX, regionZ);
++ final long attempt = this.stateLock.tryOptimisticRead();
++ if (attempt != 0L) {
++ try {
++ final ChunkSlicesRegion ret = this.regions.get(key);
++
++ if (this.stateLock.validate(attempt)) {
++ return ret;
++ }
++ } catch (final Error error) {
++ throw error;
++ } catch (final Throwable thr) {
++ // ignore
++ }
++ }
++
++ this.stateLock.readLock();
++ try {
++ return this.regions.get(key);
++ } finally {
++ this.stateLock.tryUnlockRead();
++ }
++ }
++
++ private synchronized void removeChunk(final int chunkX, final int chunkZ) {
++ final long key = CoordinateUtils.getChunkKey(chunkX >> REGION_SHIFT, chunkZ >> REGION_SHIFT);
++ final int relIndex = (chunkX & REGION_MASK) | ((chunkZ & REGION_MASK) << REGION_SHIFT);
++
++ final ChunkSlicesRegion region = this.regions.get(key);
++ final int remaining = region.remove(relIndex);
++
++ if (remaining == 0) {
++ this.stateLock.writeLock();
++ try {
++ this.regions.remove(key);
++ } finally {
++ this.stateLock.tryUnlockWrite();
++ }
++ }
++ }
++
++ public synchronized void addChunk(final int chunkX, final int chunkZ, final ChunkEntitySlices slices) {
++ final long key = CoordinateUtils.getChunkKey(chunkX >> REGION_SHIFT, chunkZ >> REGION_SHIFT);
++ final int relIndex = (chunkX & REGION_MASK) | ((chunkZ & REGION_MASK) << REGION_SHIFT);
++
++ ChunkSlicesRegion region = this.regions.get(key);
++ if (region != null) {
++ region.add(relIndex, slices);
++ } else {
++ region = new ChunkSlicesRegion();
++ region.add(relIndex, slices);
++ this.stateLock.writeLock();
++ try {
++ this.regions.put(key, region);
++ } finally {
++ this.stateLock.tryUnlockWrite();
++ }
++ }
++ }
++
++ public static final class ChunkSlicesRegion {
++
++ protected final ChunkEntitySlices[] slices = new ChunkEntitySlices[REGION_SIZE * REGION_SIZE];
++ protected int sliceCount;
++
++ public ChunkEntitySlices get(final int index) {
++ return this.slices[index];
++ }
++
++ public int remove(final int index) {
++ final ChunkEntitySlices slices = this.slices[index];
++ if (slices == null) {
++ throw new IllegalStateException();
++ }
++
++ this.slices[index] = null;
++
++ return --this.sliceCount;
++ }
++
++ public void add(final int index, final ChunkEntitySlices slices) {
++ final ChunkEntitySlices curr = this.slices[index];
++ if (curr != null) {
++ throw new IllegalStateException();
++ }
++
++ this.slices[index] = slices;
++
++ ++this.sliceCount;
++ }
++ }
++
++ private final class EntityCallback implements EntityInLevelCallback {
++
++ public final Entity entity;
++
++ public EntityCallback(final Entity entity) {
++ this.entity = entity;
++ }
++
++ @Override
++ public void onMove() {
++ final Entity entity = this.entity;
++ final Visibility oldVisibility = getEntityStatus(entity);
++ final ChunkEntitySlices newSlices = EntityLookup.this.moveEntity(this.entity);
++ if (newSlices == null) {
++ // no new section, so didn't change sections
++ return;
++ }
++ final Visibility newVisibility = getEntityStatus(entity);
++
++ EntityLookup.this.entityStatusChange(entity, newSlices, oldVisibility, newVisibility, true, false, false);
++ }
++
++ @Override
++ public void onRemove(final Entity.RemovalReason reason) {
++ final Entity entity = this.entity;
++ TickThread.ensureTickThread(entity, "Cannot remove entity off-main"); // Paper - rewrite chunk system
++ final Visibility tickingState = EntityLookup.getEntityStatus(entity);
++
++ EntityLookup.this.removeEntity(entity);
++
++ EntityLookup.this.entityStatusChange(entity, null, tickingState, Visibility.HIDDEN, false, false, reason.shouldDestroy());
++
++ this.entity.setLevelCallback(NoOpCallback.INSTANCE);
++ }
++ }
++
++ private static final class NoOpCallback implements EntityInLevelCallback {
++
++ public static final NoOpCallback INSTANCE = new NoOpCallback();
++
++ @Override
++ public void onMove() {}
++
++ @Override
++ public void onRemove(final Entity.RemovalReason reason) {}
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/chunk/system/io/RegionFileIOThread.java b/src/main/java/io/papermc/paper/chunk/system/io/RegionFileIOThread.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..de137486f610e9042853512f630b9dcc74b29280
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/chunk/system/io/RegionFileIOThread.java
+@@ -0,0 +1,1328 @@
++package io.papermc.paper.chunk.system.io;
++
++import ca.spottedleaf.concurrentutil.collection.MultiThreadedQueue;
++import ca.spottedleaf.concurrentutil.executor.Cancellable;
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedQueueExecutorThread;
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedThreadedTaskQueue;
++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
++import com.mojang.logging.LogUtils;
++import io.papermc.paper.util.CoordinateUtils;
++import io.papermc.paper.util.TickThread;
++import it.unimi.dsi.fastutil.HashCommon;
++import net.minecraft.nbt.CompoundTag;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.world.level.ChunkPos;
++import net.minecraft.world.level.chunk.storage.RegionFile;
++import net.minecraft.world.level.chunk.storage.RegionFileStorage;
++import org.slf4j.Logger;
++import java.io.IOException;
++import java.lang.invoke.VarHandle;
++import java.util.concurrent.CompletableFuture;
++import java.util.concurrent.CompletionException;
++import java.util.concurrent.ConcurrentHashMap;
++import java.util.concurrent.atomic.AtomicInteger;
++import java.util.function.BiConsumer;
++import java.util.function.BiFunction;
++import java.util.function.Consumer;
++import java.util.function.Function;
++
++/**
++ * Prioritised RegionFile I/O executor, responsible for all RegionFile access.
++ *
++ * All functions provided are MT-Safe, however certain ordering constraints are recommended:
++ *
++ * Chunk saves may not occur for unloaded chunks.
++ *
++ *
++ * Tasks must be scheduled on the chunk scheduler thread.
++ *
++ * By following these constraints, no chunk data loss should occur with the exception of underlying I/O problems.
++ *
++ */
++public final class RegionFileIOThread extends PrioritisedQueueExecutorThread {
++
++ private static final Logger LOGGER = LogUtils.getLogger();
++
++ /**
++ * The kinds of region files controlled by the region file thread. Add more when needed, and ensure
++ * getControllerFor is updated.
++ */
++ public static enum RegionFileType {
++ CHUNK_DATA,
++ POI_DATA,
++ ENTITY_DATA;
++ }
++
++ protected static final RegionFileType[] CACHED_REGIONFILE_TYPES = RegionFileType.values();
++
++ private ChunkDataController getControllerFor(final ServerLevel world, final RegionFileType type) {
++ switch (type) {
++ case CHUNK_DATA:
++ return world.chunkDataControllerNew;
++ case POI_DATA:
++ return world.poiDataControllerNew;
++ case ENTITY_DATA:
++ return world.entityDataControllerNew;
++ default:
++ throw new IllegalStateException("Unknown controller type " + type);
++ }
++ }
++
++ /**
++ * Collects regionfile data for a certain chunk.
++ */
++ public static final class RegionFileData {
++
++ private final boolean[] hasResult = new boolean[CACHED_REGIONFILE_TYPES.length];
++ private final CompoundTag[] data = new CompoundTag[CACHED_REGIONFILE_TYPES.length];
++ private final Throwable[] throwables = new Throwable[CACHED_REGIONFILE_TYPES.length];
++
++ /**
++ * Sets the result associated with the specified regionfile type. Note that
++ * results can only be set once per regionfile type.
++ *
++ * @param type The regionfile type.
++ * @param data The result to set.
++ */
++ public void setData(final RegionFileType type, final CompoundTag data) {
++ final int index = type.ordinal();
++
++ if (this.hasResult[index]) {
++ throw new IllegalArgumentException("Result already exists for type " + type);
++ }
++ this.hasResult[index] = true;
++ this.data[index] = data;
++ }
++
++ /**
++ * Sets the result associated with the specified regionfile type. Note that
++ * results can only be set once per regionfile type.
++ *
++ * @param type The regionfile type.
++ * @param throwable The result to set.
++ */
++ public void setThrowable(final RegionFileType type, final Throwable throwable) {
++ final int index = type.ordinal();
++
++ if (this.hasResult[index]) {
++ throw new IllegalArgumentException("Result already exists for type " + type);
++ }
++ this.hasResult[index] = true;
++ this.throwables[index] = throwable;
++ }
++
++ /**
++ * Returns whether there is a result for the specified regionfile type.
++ *
++ * @param type Specified regionfile type.
++ *
++ * @return Whether a result exists for {@code type}.
++ */
++ public boolean hasResult(final RegionFileType type) {
++ return this.hasResult[type.ordinal()];
++ }
++
++ /**
++ * Returns the data result for the regionfile type.
++ *
++ * @param type Specified regionfile type.
++ *
++ * @throws IllegalArgumentException If the result has not been set for {@code type}.
++ * @return The data result for the specified type. If the result is a {@code Throwable},
++ * then returns {@code null}.
++ */
++ public CompoundTag getData(final RegionFileType type) {
++ final int index = type.ordinal();
++
++ if (!this.hasResult[index]) {
++ throw new IllegalArgumentException("Result does not exist for type " + type);
++ }
++
++ return this.data[index];
++ }
++
++ /**
++ * Returns the throwable result for the regionfile type.
++ *
++ * @param type Specified regionfile type.
++ *
++ * @throws IllegalArgumentException If the result has not been set for {@code type}.
++ * @return The throwable result for the specified type. If the result is an {@code CompoundTag},
++ * then returns {@code null}.
++ */
++ public Throwable getThrowable(final RegionFileType type) {
++ final int index = type.ordinal();
++
++ if (!this.hasResult[index]) {
++ throw new IllegalArgumentException("Result does not exist for type " + type);
++ }
++
++ return this.throwables[index];
++ }
++ }
++
++ private static final Object INIT_LOCK = new Object();
++
++ static RegionFileIOThread[] threads;
++
++ /* needs to be consistent given a set of parameters */
++ static RegionFileIOThread selectThread(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type) {
++ if (threads == null) {
++ throw new IllegalStateException("Threads not initialised");
++ }
++
++ final int regionX = chunkX >> 5;
++ final int regionZ = chunkZ >> 5;
++ final int typeOffset = type.ordinal();
++
++ return threads[(System.identityHashCode(world) + regionX + regionZ + typeOffset) % threads.length];
++ }
++
++ /**
++ * Shuts down the I/O executor(s). Watis for all tasks to complete if specified.
++ * Tasks queued during this call might not be accepted, and tasks queued after will not be accepted.
++ *
++ * @param wait Whether to wait until all tasks have completed.
++ */
++ public static void close(final boolean wait) {
++ for (int i = 0, len = threads.length; i < len; ++i) {
++ threads[i].close(false, true);
++ }
++ if (wait) {
++ RegionFileIOThread.flush();
++ }
++ }
++
++ public static long[] getExecutedTasks() {
++ final long[] ret = new long[threads.length];
++ for (int i = 0, len = threads.length; i < len; ++i) {
++ ret[i] = threads[i].getTotalTasksExecuted();
++ }
++
++ return ret;
++ }
++
++ public static long[] getTasksScheduled() {
++ final long[] ret = new long[threads.length];
++ for (int i = 0, len = threads.length; i < len; ++i) {
++ ret[i] = threads[i].getTotalTasksScheduled();
++ }
++ return ret;
++ }
++
++ public static void flush() {
++ for (int i = 0, len = threads.length; i < len; ++i) {
++ threads[i].waitUntilAllExecuted();
++ }
++ }
++
++ public static void partialFlush(final int totalTasksRemaining) {
++ long failures = 1L; // start out at 0.25ms
++
++ for (;;) {
++ final long[] executed = getExecutedTasks();
++ final long[] scheduled = getTasksScheduled();
++
++ long sum = 0;
++ for (int i = 0; i < executed.length; ++i) {
++ sum += scheduled[i] - executed[i];
++ }
++
++ if (sum <= totalTasksRemaining) {
++ break;
++ }
++
++ failures = ConcurrentUtil.linearLongBackoff(failures, 250_000L, 5_000_000L); // 500us, 5ms
++ }
++ }
++
++ /**
++ * Inits the executor with the specified number of threads.
++ *
++ * @param threads Specified number of threads.
++ */
++ public static void init(final int threads) {
++ synchronized (INIT_LOCK) {
++ if (RegionFileIOThread.threads != null) {
++ throw new IllegalStateException("Already initialised threads");
++ }
++
++ RegionFileIOThread.threads = new RegionFileIOThread[threads];
++
++ for (int i = 0; i < threads; ++i) {
++ RegionFileIOThread.threads[i] = new RegionFileIOThread(i);
++ RegionFileIOThread.threads[i].start();
++ }
++ }
++ }
++
++ private RegionFileIOThread(final int threadNumber) {
++ super(new PrioritisedThreadedTaskQueue(), (int)(1.0e6)); // 1.0ms spinwait time
++ this.setName("RegionFile I/O Thread #" + threadNumber);
++ this.setPriority(Thread.NORM_PRIORITY - 2); // we keep priority close to normal because threads can wait on us
++ this.setUncaughtExceptionHandler((final Thread thread, final Throwable thr) -> {
++ LOGGER.error("Uncaught exception thrown from I/O thread, report this! Thread: " + thread.getName(), thr);
++ });
++ }
++
++ /**
++ * Returns whether the current thread is a regionfile I/O executor.
++ * @return Whether the current thread is a regionfile I/O executor.
++ */
++ public static boolean isRegionFileThread() {
++ return Thread.currentThread() instanceof RegionFileIOThread;
++ }
++
++ /**
++ * Returns the priority associated with blocking I/O based on the current thread. The goal is to avoid
++ * dumb plugins from taking away priority from threads we consider crucial.
++ * @return The priroity to use with blocking I/O on the current thread.
++ */
++ public static PrioritisedExecutor.Priority getIOBlockingPriorityForCurrentThread() {
++ if (TickThread.isTickThread()) {
++ return PrioritisedExecutor.Priority.BLOCKING;
++ }
++ return PrioritisedExecutor.Priority.HIGHEST;
++ }
++
++ /**
++ * Returns the current {@code CompoundTag} pending for write for the specified chunk & regionfile type.
++ * Note that this does not copy the result, so do not modify the result returned.
++ *
++ * @param world Specified world.
++ * @param chunkX Specified chunk x.
++ * @param chunkZ Specified chunk z.
++ * @param type Specified regionfile type.
++ *
++ * @return The compound tag associated for the specified chunk. {@code null} if no write was pending, or if {@code null} is the write pending.
++ */
++ public static CompoundTag getPendingWrite(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type) {
++ final RegionFileIOThread thread = RegionFileIOThread.selectThread(world, chunkX, chunkZ, type);
++ return thread.getPendingWriteInternal(world, chunkX, chunkZ, type);
++ }
++
++ CompoundTag getPendingWriteInternal(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type) {
++ final ChunkDataController taskController = this.getControllerFor(world, type);
++ final ChunkDataTask task = taskController.tasks.get(Long.valueOf(CoordinateUtils.getChunkKey(chunkX, chunkZ)));
++
++ if (task == null) {
++ return null;
++ }
++
++ final CompoundTag ret = task.inProgressWrite;
++
++ return ret == ChunkDataTask.NOTHING_TO_WRITE ? null : ret;
++ }
++
++ /**
++ * Returns the priority for the specified regionfile type for the specified chunk.
++ * @param world Specified world.
++ * @param chunkX Specified chunk x.
++ * @param chunkZ Specified chunk z.
++ * @param type Specified regionfile type.
++ * @return The priority for the chunk
++ */
++ public static PrioritisedExecutor.Priority getPriority(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type) {
++ final RegionFileIOThread thread = RegionFileIOThread.selectThread(world, chunkX, chunkZ, type);
++ return thread.getPriorityInternal(world, chunkX, chunkZ, type);
++ }
++
++ PrioritisedExecutor.Priority getPriorityInternal(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type) {
++ final ChunkDataController taskController = this.getControllerFor(world, type);
++ final ChunkDataTask task = taskController.tasks.get(Long.valueOf(CoordinateUtils.getChunkKey(chunkX, chunkZ)));
++
++ if (task == null) {
++ return PrioritisedExecutor.Priority.COMPLETING;
++ }
++
++ return task.prioritisedTask.getPriority();
++ }
++
++ /**
++ * Sets the priority for all regionfile types for the specified chunk. Note that great care should
++ * be taken using this method, as there can be multiple tasks tied to the same chunk that want different
++ * priorities.
++ *
++ * @param world Specified world.
++ * @param chunkX Specified chunk x.
++ * @param chunkZ Specified chunk z.
++ * @param priority New priority.
++ *
++ * @see #raisePriority(ServerLevel, int, int, Priority)
++ * @see #raisePriority(ServerLevel, int, int, RegionFileType, Priority)
++ * @see #lowerPriority(ServerLevel, int, int, Priority)
++ * @see #lowerPriority(ServerLevel, int, int, RegionFileType, Priority)
++ */
++ public static void setPriority(final ServerLevel world, final int chunkX, final int chunkZ,
++ final PrioritisedExecutor.Priority priority) {
++ for (final RegionFileType type : CACHED_REGIONFILE_TYPES) {
++ RegionFileIOThread.setPriority(world, chunkX, chunkZ, type, priority);
++ }
++ }
++
++ /**
++ * Sets the priority for the specified regionfile type for the specified chunk. Note that great care should
++ * be taken using this method, as there can be multiple tasks tied to the same chunk that want different
++ * priorities.
++ *
++ * @param world Specified world.
++ * @param chunkX Specified chunk x.
++ * @param chunkZ Specified chunk z.
++ * @param type Specified regionfile type.
++ * @param priority New priority.
++ *
++ * @see #raisePriority(ServerLevel, int, int, Priority)
++ * @see #raisePriority(ServerLevel, int, int, RegionFileType, Priority)
++ * @see #lowerPriority(ServerLevel, int, int, Priority)
++ * @see #lowerPriority(ServerLevel, int, int, RegionFileType, Priority)
++ */
++ public static void setPriority(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type,
++ final PrioritisedExecutor.Priority priority) {
++ final RegionFileIOThread thread = RegionFileIOThread.selectThread(world, chunkX, chunkZ, type);
++ thread.setPriorityInternal(world, chunkX, chunkZ, type, priority);
++ }
++
++ void setPriorityInternal(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type,
++ final PrioritisedExecutor.Priority priority) {
++ final ChunkDataController taskController = this.getControllerFor(world, type);
++ final ChunkDataTask task = taskController.tasks.get(Long.valueOf(CoordinateUtils.getChunkKey(chunkX, chunkZ)));
++
++ if (task != null) {
++ task.prioritisedTask.setPriority(priority);
++ }
++ }
++
++ /**
++ * Raises the priority for all regionfile types for the specified chunk.
++ *
++ * @param world Specified world.
++ * @param chunkX Specified chunk x.
++ * @param chunkZ Specified chunk z.
++ * @param priority New priority.
++ *
++ * @see #setPriority(ServerLevel, int, int, Priority)
++ * @see #setPriority(ServerLevel, int, int, RegionFileType, Priority)
++ * @see #lowerPriority(ServerLevel, int, int, Priority)
++ * @see #lowerPriority(ServerLevel, int, int, RegionFileType, Priority)
++ */
++ public static void raisePriority(final ServerLevel world, final int chunkX, final int chunkZ,
++ final PrioritisedExecutor.Priority priority) {
++ for (final RegionFileType type : CACHED_REGIONFILE_TYPES) {
++ RegionFileIOThread.raisePriority(world, chunkX, chunkZ, type, priority);
++ }
++ }
++
++ /**
++ * Raises the priority for the specified regionfile type for the specified chunk.
++ *
++ * @param world Specified world.
++ * @param chunkX Specified chunk x.
++ * @param chunkZ Specified chunk z.
++ * @param type Specified regionfile type.
++ * @param priority New priority.
++ *
++ * @see #setPriority(ServerLevel, int, int, Priority)
++ * @see #setPriority(ServerLevel, int, int, RegionFileType, Priority)
++ * @see #lowerPriority(ServerLevel, int, int, Priority)
++ * @see #lowerPriority(ServerLevel, int, int, RegionFileType, Priority)
++ */
++ public static void raisePriority(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type,
++ final PrioritisedExecutor.Priority priority) {
++ final RegionFileIOThread thread = RegionFileIOThread.selectThread(world, chunkX, chunkZ, type);
++ thread.raisePriorityInternal(world, chunkX, chunkZ, type, priority);
++ }
++
++ void raisePriorityInternal(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type,
++ final PrioritisedExecutor.Priority priority) {
++ final ChunkDataController taskController = this.getControllerFor(world, type);
++ final ChunkDataTask task = taskController.tasks.get(Long.valueOf(CoordinateUtils.getChunkKey(chunkX, chunkZ)));
++
++ if (task != null) {
++ task.prioritisedTask.raisePriority(priority);
++ }
++ }
++
++ /**
++ * Lowers the priority for all regionfile types for the specified chunk.
++ *
++ * @param world Specified world.
++ * @param chunkX Specified chunk x.
++ * @param chunkZ Specified chunk z.
++ * @param priority New priority.
++ *
++ * @see #raisePriority(ServerLevel, int, int, Priority)
++ * @see #raisePriority(ServerLevel, int, int, RegionFileType, Priority)
++ * @see #setPriority(ServerLevel, int, int, Priority)
++ * @see #setPriority(ServerLevel, int, int, RegionFileType, Priority)
++ */
++ public static void lowerPriority(final ServerLevel world, final int chunkX, final int chunkZ,
++ final PrioritisedExecutor.Priority priority) {
++ for (final RegionFileType type : CACHED_REGIONFILE_TYPES) {
++ RegionFileIOThread.lowerPriority(world, chunkX, chunkZ, type, priority);
++ }
++ }
++
++ /**
++ * Lowers the priority for the specified regionfile type for the specified chunk.
++ *
++ * @param world Specified world.
++ * @param chunkX Specified chunk x.
++ * @param chunkZ Specified chunk z.
++ * @param type Specified regionfile type.
++ * @param priority New priority.
++ *
++ * @see #raisePriority(ServerLevel, int, int, Priority)
++ * @see #raisePriority(ServerLevel, int, int, RegionFileType, Priority)
++ * @see #setPriority(ServerLevel, int, int, Priority)
++ * @see #setPriority(ServerLevel, int, int, RegionFileType, Priority)
++ */
++ public static void lowerPriority(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type,
++ final PrioritisedExecutor.Priority priority) {
++ final RegionFileIOThread thread = RegionFileIOThread.selectThread(world, chunkX, chunkZ, type);
++ thread.lowerPriorityInternal(world, chunkX, chunkZ, type, priority);
++ }
++
++ void lowerPriorityInternal(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type,
++ final PrioritisedExecutor.Priority priority) {
++ final ChunkDataController taskController = this.getControllerFor(world, type);
++ final ChunkDataTask task = taskController.tasks.get(Long.valueOf(CoordinateUtils.getChunkKey(chunkX, chunkZ)));
++
++ if (task != null) {
++ task.prioritisedTask.lowerPriority(priority);
++ }
++ }
++
++ /**
++ * Schedules the chunk data to be written asynchronously.
++ *
++ * Impl notes:
++ *
++ *
++ * This function presumes a chunk load for the coordinates is not called during this function (anytime after is OK). This means
++ * saves must be scheduled before a chunk is unloaded.
++ *
++ *
++ * Writes may be called concurrently, although only the "later" write will go through.
++ *
++ *
++ * @param world Chunk's world
++ * @param chunkX Chunk's x coordinate
++ * @param chunkZ Chunk's z coordinate
++ * @param data Chunk's data
++ * @param type The regionfile type to write to.
++ *
++ * @throws IllegalStateException If the file io thread has shutdown.
++ */
++ public static void scheduleSave(final ServerLevel world, final int chunkX, final int chunkZ, final CompoundTag data,
++ final RegionFileType type) {
++ RegionFileIOThread.scheduleSave(world, chunkX, chunkZ, data, type, PrioritisedExecutor.Priority.NORMAL);
++ }
++
++ /**
++ * Schedules the chunk data to be written asynchronously.
++ *
++ * Impl notes:
++ *
++ *
++ * This function presumes a chunk load for the coordinates is not called during this function (anytime after is OK). This means
++ * saves must be scheduled before a chunk is unloaded.
++ *
++ *
++ * Writes may be called concurrently, although only the "later" write will go through.
++ *
++ *
++ * @param world Chunk's world
++ * @param chunkX Chunk's x coordinate
++ * @param chunkZ Chunk's z coordinate
++ * @param data Chunk's data
++ * @param type The regionfile type to write to.
++ * @param priority The minimum priority to schedule at.
++ *
++ * @throws IllegalStateException If the file io thread has shutdown.
++ */
++ public static void scheduleSave(final ServerLevel world, final int chunkX, final int chunkZ, final CompoundTag data,
++ final RegionFileType type, final PrioritisedExecutor.Priority priority) {
++ final RegionFileIOThread thread = RegionFileIOThread.selectThread(world, chunkX, chunkZ, type);
++ thread.scheduleSaveInternal(world, chunkX, chunkZ, data, type, priority);
++ }
++
++ void scheduleSaveInternal(final ServerLevel world, final int chunkX, final int chunkZ, final CompoundTag data,
++ final RegionFileType type, final PrioritisedExecutor.Priority priority) {
++ final ChunkDataController taskController = this.getControllerFor(world, type);
++
++ final boolean[] created = new boolean[1];
++ final ChunkCoordinate key = new ChunkCoordinate(CoordinateUtils.getChunkKey(chunkX, chunkZ));
++ final ChunkDataTask task = taskController.tasks.compute(key, (final ChunkCoordinate keyInMap, final ChunkDataTask taskRunning) -> {
++ if (taskRunning == null || taskRunning.failedWrite) {
++ // no task is scheduled or the previous write failed - meaning we need to overwrite it
++
++ // create task
++ final ChunkDataTask newTask = new ChunkDataTask(world, chunkX, chunkZ, taskController, RegionFileIOThread.this, priority);
++ newTask.inProgressWrite = data;
++ created[0] = true;
++
++ return newTask;
++ }
++
++ taskRunning.inProgressWrite = data;
++
++ return taskRunning;
++ });
++
++ if (created[0]) {
++ task.prioritisedTask.queue();
++ } else {
++ task.prioritisedTask.raisePriority(priority);
++ }
++ }
++
++ /**
++ * Schedules a load to be executed asynchronously. This task will load all regionfile types, and then call
++ * {@code onComplete}. This is a bulk load operation, see {@link #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean)}
++ * for single load.
++ *
++ * Impl notes:
++ *
++ *
++ * The {@code onComplete} parameter may be completed during the execution of this function synchronously or it may
++ * be completed asynchronously on this file io thread. Interacting with the file IO thread in the completion of
++ * data is undefined behaviour, and can cause deadlock.
++ *
++ *
++ * @param world Chunk's world
++ * @param chunkX Chunk's x coordinate
++ * @param chunkZ Chunk's z coordinate
++ * @param onComplete Consumer to execute once this task has completed
++ * @param intendingToBlock Whether the caller is intending to block on completion. This only affects the cost
++ * of this call.
++ *
++ * @return The {@link Cancellable} for this chunk load. Cancelling it will not affect other loads for the same chunk data.
++ *
++ * @see #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean)
++ * @see #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean, Priority)
++ * @see #loadChunkData(ServerLevel, int, int, Consumer, boolean, RegionFileType...)
++ * @see #loadChunkData(ServerLevel, int, int, Consumer, boolean, Priority, RegionFileType...)
++ */
++ public static Cancellable loadAllChunkData(final ServerLevel world, final int chunkX, final int chunkZ,
++ final Consumer onComplete, final boolean intendingToBlock) {
++ return RegionFileIOThread.loadAllChunkData(world, chunkX, chunkZ, onComplete, intendingToBlock, PrioritisedExecutor.Priority.NORMAL);
++ }
++
++ /**
++ * Schedules a load to be executed asynchronously. This task will load all regionfile types, and then call
++ * {@code onComplete}. This is a bulk load operation, see {@link #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean, Priority)}
++ * for single load.
++ *
++ * Impl notes:
++ *
++ *
++ * The {@code onComplete} parameter may be completed during the execution of this function synchronously or it may
++ * be completed asynchronously on this file io thread. Interacting with the file IO thread in the completion of
++ * data is undefined behaviour, and can cause deadlock.
++ *
++ *
++ * @param world Chunk's world
++ * @param chunkX Chunk's x coordinate
++ * @param chunkZ Chunk's z coordinate
++ * @param onComplete Consumer to execute once this task has completed
++ * @param intendingToBlock Whether the caller is intending to block on completion. This only affects the cost
++ * of this call.
++ * @param priority The minimum priority to load the data at.
++ *
++ * @return The {@link Cancellable} for this chunk load. Cancelling it will not affect other loads for the same chunk data.
++ *
++ * @see #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean)
++ * @see #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean, Priority)
++ * @see #loadChunkData(ServerLevel, int, int, Consumer, boolean, RegionFileType...)
++ * @see #loadChunkData(ServerLevel, int, int, Consumer, boolean, Priority, RegionFileType...)
++ */
++ public static Cancellable loadAllChunkData(final ServerLevel world, final int chunkX, final int chunkZ,
++ final Consumer onComplete, final boolean intendingToBlock,
++ final PrioritisedExecutor.Priority priority) {
++ return RegionFileIOThread.loadChunkData(world, chunkX, chunkZ, onComplete, intendingToBlock, priority, CACHED_REGIONFILE_TYPES);
++ }
++
++ /**
++ * Schedules a load to be executed asynchronously. This task will load data for the specified regionfile type(s), and
++ * then call {@code onComplete}. This is a bulk load operation, see {@link #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean)}
++ * for single load.
++ *
++ * Impl notes:
++ *
++ *
++ * The {@code onComplete} parameter may be completed during the execution of this function synchronously or it may
++ * be completed asynchronously on this file io thread. Interacting with the file IO thread in the completion of
++ * data is undefined behaviour, and can cause deadlock.
++ *
++ *
++ * @param world Chunk's world
++ * @param chunkX Chunk's x coordinate
++ * @param chunkZ Chunk's z coordinate
++ * @param onComplete Consumer to execute once this task has completed
++ * @param intendingToBlock Whether the caller is intending to block on completion. This only affects the cost
++ * of this call.
++ * @param types The regionfile type(s) to load.
++ *
++ * @return The {@link Cancellable} for this chunk load. Cancelling it will not affect other loads for the same chunk data.
++ *
++ * @see #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean)
++ * @see #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean, Priority)
++ * @see #loadAllChunkData(ServerLevel, int, int, Consumer, boolean)
++ * @see #loadAllChunkData(ServerLevel, int, int, Consumer, boolean, Priority)
++ */
++ public static Cancellable loadChunkData(final ServerLevel world, final int chunkX, final int chunkZ,
++ final Consumer onComplete, final boolean intendingToBlock,
++ final RegionFileType... types) {
++ return RegionFileIOThread.loadChunkData(world, chunkX, chunkZ, onComplete, intendingToBlock, PrioritisedExecutor.Priority.NORMAL, types);
++ }
++
++ /**
++ * Schedules a load to be executed asynchronously. This task will load data for the specified regionfile type(s), and
++ * then call {@code onComplete}. This is a bulk load operation, see {@link #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean, Priority)}
++ * for single load.
++ *
++ * Impl notes:
++ *
++ *
++ * The {@code onComplete} parameter may be completed during the execution of this function synchronously or it may
++ * be completed asynchronously on this file io thread. Interacting with the file IO thread in the completion of
++ * data is undefined behaviour, and can cause deadlock.
++ *
++ *
++ * @param world Chunk's world
++ * @param chunkX Chunk's x coordinate
++ * @param chunkZ Chunk's z coordinate
++ * @param onComplete Consumer to execute once this task has completed
++ * @param intendingToBlock Whether the caller is intending to block on completion. This only affects the cost
++ * of this call.
++ * @param types The regionfile type(s) to load.
++ * @param priority The minimum priority to load the data at.
++ *
++ * @return The {@link Cancellable} for this chunk load. Cancelling it will not affect other loads for the same chunk data.
++ *
++ * @see #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean)
++ * @see #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean, Priority)
++ * @see #loadAllChunkData(ServerLevel, int, int, Consumer, boolean)
++ * @see #loadAllChunkData(ServerLevel, int, int, Consumer, boolean, Priority)
++ */
++ public static Cancellable loadChunkData(final ServerLevel world, final int chunkX, final int chunkZ,
++ final Consumer onComplete, final boolean intendingToBlock,
++ final PrioritisedExecutor.Priority priority, final RegionFileType... types) {
++ if (types == null) {
++ throw new NullPointerException("Types cannot be null");
++ }
++ if (types.length == 0) {
++ throw new IllegalArgumentException("Types cannot be empty");
++ }
++
++ final RegionFileData ret = new RegionFileData();
++
++ final Cancellable[] reads = new CancellableRead[types.length];
++ final AtomicInteger completions = new AtomicInteger();
++ final int expectedCompletions = types.length;
++
++ for (int i = 0; i < expectedCompletions; ++i) {
++ final RegionFileType type = types[i];
++ reads[i] = RegionFileIOThread.loadDataAsync(world, chunkX, chunkZ, type,
++ (final CompoundTag data, final Throwable throwable) -> {
++ if (throwable != null) {
++ ret.setThrowable(type, throwable);
++ } else {
++ ret.setData(type, data);
++ }
++
++ if (completions.incrementAndGet() == expectedCompletions) {
++ onComplete.accept(ret);
++ }
++ }, intendingToBlock, priority);
++ }
++
++ return new CancellableReads(reads);
++ }
++
++ /**
++ * Schedules a load to be executed asynchronously. This task will load the specified regionfile type, and then call
++ * {@code onComplete}.
++ *
++ * Impl notes:
++ *
++ *
++ * The {@code onComplete} parameter may be completed during the execution of this function synchronously or it may
++ * be completed asynchronously on this file io thread. Interacting with the file IO thread in the completion of
++ * data is undefined behaviour, and can cause deadlock.
++ *
++ *
++ * @param world Chunk's world
++ * @param chunkX Chunk's x coordinate
++ * @param chunkZ Chunk's z coordinate
++ * @param onComplete Consumer to execute once this task has completed
++ * @param intendingToBlock Whether the caller is intending to block on completion. This only affects the cost
++ * of this call.
++ *
++ * @return The {@link Cancellable} for this chunk load. Cancelling it will not affect other loads for the same chunk data.
++ *
++ * @see #loadChunkData(ServerLevel, int, int, Consumer, boolean, RegionFileType...)
++ * @see #loadChunkData(ServerLevel, int, int, Consumer, boolean, Priority, RegionFileType...)
++ * @see #loadAllChunkData(ServerLevel, int, int, Consumer, boolean)
++ * @see #loadAllChunkData(ServerLevel, int, int, Consumer, boolean, Priority)
++ */
++ public static Cancellable loadDataAsync(final ServerLevel world, final int chunkX, final int chunkZ,
++ final RegionFileType type, final BiConsumer onComplete,
++ final boolean intendingToBlock) {
++ return RegionFileIOThread.loadDataAsync(world, chunkX, chunkZ, type, onComplete, intendingToBlock, PrioritisedExecutor.Priority.NORMAL);
++ }
++
++ /**
++ * Schedules a load to be executed asynchronously. This task will load the specified regionfile type, and then call
++ * {@code onComplete}.
++ *
++ * Impl notes:
++ *
++ *
++ * The {@code onComplete} parameter may be completed during the execution of this function synchronously or it may
++ * be completed asynchronously on this file io thread. Interacting with the file IO thread in the completion of
++ * data is undefined behaviour, and can cause deadlock.
++ *
++ *
++ * @param world Chunk's world
++ * @param chunkX Chunk's x coordinate
++ * @param chunkZ Chunk's z coordinate
++ * @param onComplete Consumer to execute once this task has completed
++ * @param intendingToBlock Whether the caller is intending to block on completion. This only affects the cost
++ * of this call.
++ * @param priority Minimum priority to load the data at.
++ *
++ * @return The {@link Cancellable} for this chunk load. Cancelling it will not affect other loads for the same chunk data.
++ *
++ * @see #loadChunkData(ServerLevel, int, int, Consumer, boolean, RegionFileType...)
++ * @see #loadChunkData(ServerLevel, int, int, Consumer, boolean, Priority, RegionFileType...)
++ * @see #loadAllChunkData(ServerLevel, int, int, Consumer, boolean)
++ * @see #loadAllChunkData(ServerLevel, int, int, Consumer, boolean, Priority)
++ */
++ public static Cancellable loadDataAsync(final ServerLevel world, final int chunkX, final int chunkZ,
++ final RegionFileType type, final BiConsumer onComplete,
++ final boolean intendingToBlock, final PrioritisedExecutor.Priority priority) {
++ final RegionFileIOThread thread = RegionFileIOThread.selectThread(world, chunkX, chunkZ, type);
++ return thread.loadDataAsyncInternal(world, chunkX, chunkZ, type, onComplete, intendingToBlock, priority);
++ }
++
++ private static Boolean doesRegionFileExist(final int chunkX, final int chunkZ, final boolean intendingToBlock,
++ final ChunkDataController taskController) {
++ final ChunkPos chunkPos = new ChunkPos(chunkX, chunkZ);
++ if (intendingToBlock) {
++ return taskController.computeForRegionFile(chunkX, chunkZ, true, (final RegionFile file) -> {
++ if (file == null) { // null if no regionfile exists
++ return Boolean.FALSE;
++ }
++
++ return file.hasChunk(chunkPos) ? Boolean.TRUE : Boolean.FALSE;
++ });
++ } else {
++ return taskController.computeForRegionFileIfLoaded(chunkX, chunkZ, (final RegionFile file) -> {
++ if (file == null) { // null if not loaded
++ return Boolean.TRUE;
++ }
++
++ return file.hasChunk(chunkPos) ? Boolean.TRUE : Boolean.FALSE;
++ });
++ }
++ }
++
++ Cancellable loadDataAsyncInternal(final ServerLevel world, final int chunkX, final int chunkZ,
++ final RegionFileType type, final BiConsumer onComplete,
++ final boolean intendingToBlock, final PrioritisedExecutor.Priority priority) {
++ final ChunkDataController taskController = this.getControllerFor(world, type);
++
++ final ImmediateCallbackCompletion callbackInfo = new ImmediateCallbackCompletion();
++
++ final ChunkCoordinate key = new ChunkCoordinate(CoordinateUtils.getChunkKey(chunkX, chunkZ));
++ final BiFunction compute = (final ChunkCoordinate keyInMap, final ChunkDataTask running) -> {
++ if (running == null) {
++ // not scheduled
++
++ if (callbackInfo.regionFileCalculation == null) {
++ // caller will compute this outside of compute(), to avoid holding the bin lock
++ callbackInfo.needsRegionFileTest = true;
++ return null;
++ }
++
++ if (callbackInfo.regionFileCalculation == Boolean.FALSE) {
++ // not on disk
++ callbackInfo.data = null;
++ callbackInfo.throwable = null;
++ callbackInfo.completeNow = true;
++ return null;
++ }
++
++ // set up task
++ final ChunkDataTask newTask = new ChunkDataTask(
++ world, chunkX, chunkZ, taskController, RegionFileIOThread.this, priority
++ );
++ newTask.inProgressRead = new RegionFileIOThread.InProgressRead();
++ newTask.inProgressRead.waiters.add(onComplete);
++
++ callbackInfo.tasksNeedsScheduling = true;
++ return newTask;
++ }
++
++ final CompoundTag pendingWrite = running.inProgressWrite;
++
++ if (pendingWrite == ChunkDataTask.NOTHING_TO_WRITE) {
++ // need to add to waiters here, because the regionfile thread will use compute() to lock and check for cancellations
++ if (!running.inProgressRead.addToWaiters(onComplete)) {
++ callbackInfo.data = running.inProgressRead.value;
++ callbackInfo.throwable = running.inProgressRead.throwable;
++ callbackInfo.completeNow = true;
++ }
++ return running;
++ }
++ // using the result sync here - don't bump priority
++
++ // at this stage we have to use the in progress write's data to avoid an order issue
++ callbackInfo.data = pendingWrite;
++ callbackInfo.throwable = null;
++ callbackInfo.completeNow = true;
++ return running;
++ };
++
++ ChunkDataTask curr = taskController.tasks.get(key);
++ if (curr == null) {
++ callbackInfo.regionFileCalculation = doesRegionFileExist(chunkX, chunkZ, intendingToBlock, taskController);
++ }
++ ChunkDataTask ret = taskController.tasks.compute(key, compute);
++ if (callbackInfo.needsRegionFileTest) {
++ // curr isn't null but when we went into compute() it was
++ callbackInfo.regionFileCalculation = doesRegionFileExist(chunkX, chunkZ, intendingToBlock, taskController);
++ // now it should be fine
++ ret = taskController.tasks.compute(key, compute);
++ }
++
++ // needs to be scheduled
++ if (callbackInfo.tasksNeedsScheduling) {
++ ret.prioritisedTask.queue();
++ } else if (callbackInfo.completeNow) {
++ try {
++ onComplete.accept(callbackInfo.data, callbackInfo.throwable);
++ } catch (final ThreadDeath thr) {
++ throw thr;
++ } catch (final Throwable thr) {
++ LOGGER.error("Callback " + ConcurrentUtil.genericToString(onComplete) + " synchronously failed to handle chunk data for task " + ret.toString(), thr);
++ }
++ } else {
++ // we're waiting on a task we didn't schedule, so raise its priority to what we want
++ ret.prioritisedTask.raisePriority(priority);
++ }
++
++ return new CancellableRead(onComplete, ret);
++ }
++
++ /**
++ * Schedules a load task to be executed asynchronously, and blocks on that task.
++ *
++ * @param world Chunk's world
++ * @param chunkX Chunk's x coordinate
++ * @param chunkZ Chunk's z coordinate
++ * @param type Regionfile type
++ * @param priority Minimum priority to load the data at.
++ *
++ * @return The chunk data for the chunk. Note that a {@code null} result means the chunk or regionfile does not exist on disk.
++ *
++ * @throws IOException If the load fails for any reason
++ */
++ public static CompoundTag loadData(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type,
++ final PrioritisedExecutor.Priority priority) throws IOException {
++ final CompletableFuture ret = new CompletableFuture<>();
++
++ RegionFileIOThread.loadDataAsync(world, chunkX, chunkZ, type, (final CompoundTag compound, final Throwable thr) -> {
++ if (thr != null) {
++ ret.completeExceptionally(thr);
++ } else {
++ ret.complete(compound);
++ }
++ }, true, priority);
++
++ try {
++ return ret.join();
++ } catch (final CompletionException ex) {
++ throw new IOException(ex);
++ }
++ }
++
++ private static final class ImmediateCallbackCompletion {
++
++ public CompoundTag data;
++ public Throwable throwable;
++ public boolean completeNow;
++ public boolean tasksNeedsScheduling;
++ public boolean needsRegionFileTest;
++ public Boolean regionFileCalculation;
++
++ }
++
++ static final class CancellableRead implements Cancellable {
++
++ private BiConsumer callback;
++ private RegionFileIOThread.ChunkDataTask task;
++
++ CancellableRead(final BiConsumer callback, final RegionFileIOThread.ChunkDataTask task) {
++ this.callback = callback;
++ this.task = task;
++ }
++
++ @Override
++ public boolean cancel() {
++ final BiConsumer callback = this.callback;
++ final RegionFileIOThread.ChunkDataTask task = this.task;
++
++ if (callback == null || task == null) {
++ return false;
++ }
++
++ this.callback = null;
++ this.task = null;
++
++ final RegionFileIOThread.InProgressRead read = task.inProgressRead;
++
++ // read can be null if no read was scheduled (i.e no regionfile existed or chunk in regionfile didn't)
++ return (read != null && read.waiters.remove(callback));
++ }
++ }
++
++ static final class CancellableReads implements Cancellable {
++
++ private Cancellable[] reads;
++
++ protected static final VarHandle READS_HANDLE = ConcurrentUtil.getVarHandle(CancellableReads.class, "reads", Cancellable[].class);
++
++ CancellableReads(final Cancellable[] reads) {
++ this.reads = reads;
++ }
++
++ @Override
++ public boolean cancel() {
++ final Cancellable[] reads = (Cancellable[])READS_HANDLE.getAndSet((CancellableReads)this, (Cancellable[])null);
++
++ if (reads == null) {
++ return false;
++ }
++
++ boolean ret = false;
++
++ for (final Cancellable read : reads) {
++ ret |= read.cancel();
++ }
++
++ return ret;
++ }
++ }
++
++ static final class InProgressRead {
++
++ private static final Logger LOGGER = LogUtils.getLogger();
++
++ CompoundTag value;
++ Throwable throwable;
++ final MultiThreadedQueue> waiters = new MultiThreadedQueue<>();
++
++ // rets false if already completed (callback not invoked), true if callback was added
++ boolean addToWaiters(final BiConsumer callback) {
++ return this.waiters.add(callback);
++ }
++
++ void complete(final RegionFileIOThread.ChunkDataTask task, final CompoundTag value, final Throwable throwable) {
++ this.value = value;
++ this.throwable = throwable;
++
++ BiConsumer consumer;
++ while ((consumer = this.waiters.pollOrBlockAdds()) != null) {
++ try {
++ consumer.accept(value, throwable);
++ } catch (final ThreadDeath thr) {
++ throw thr;
++ } catch (final Throwable thr) {
++ LOGGER.error("Callback " + ConcurrentUtil.genericToString(consumer) + " failed to handle chunk data for task " + task.toString(), thr);
++ }
++ }
++ }
++ }
++
++ /**
++ * Class exists to replace {@link Long} usages as keys inside non-fastutil hashtables. The hash for some Long {@code x}
++ * is defined as {@code (x >>> 32) ^ x}. Chunk keys as long values are defined as {@code ((chunkX & 0xFFFFFFFFL) | (chunkZ << 32))},
++ * which means the hashcode as a Long value will be {@code chunkX ^ chunkZ}. Given that most chunks are created within a radius arounds players,
++ * this will lead to many hash collisions. So, this class uses a better hashing algorithm so that usage of
++ * non-fastutil collections is not degraded.
++ */
++ public static final class ChunkCoordinate implements Comparable {
++
++ public final long key;
++
++ public ChunkCoordinate(final long key) {
++ this.key = key;
++ }
++
++ @Override
++ public int hashCode() {
++ return (int)HashCommon.mix(this.key);
++ }
++
++ @Override
++ public boolean equals(final Object obj) {
++ if (this == obj) {
++ return true;
++ }
++
++ if (!(obj instanceof ChunkCoordinate)) {
++ return false;
++ }
++
++ final ChunkCoordinate other = (ChunkCoordinate)obj;
++
++ return this.key == other.key;
++ }
++
++ // This class is intended for HashMap/ConcurrentHashMap usage, which do treeify bin nodes if the chain
++ // is too large. So we should implement compareTo to help.
++ @Override
++ public int compareTo(final RegionFileIOThread.ChunkCoordinate other) {
++ return Long.compare(this.key, other.key);
++ }
++
++ @Override
++ public String toString() {
++ return new ChunkPos(this.key).toString();
++ }
++ }
++
++ public static abstract class ChunkDataController {
++
++ // ConcurrentHashMap synchronizes per chain, so reduce the chance of task's hashes colliding.
++ protected final ConcurrentHashMap tasks = new ConcurrentHashMap<>(8192, 0.10f);
++
++ public final RegionFileType type;
++
++ public ChunkDataController(final RegionFileType type) {
++ this.type = type;
++ }
++
++ public abstract RegionFileStorage getCache();
++
++ public abstract void writeData(final int chunkX, final int chunkZ, final CompoundTag compound) throws IOException;
++
++ public abstract CompoundTag readData(final int chunkX, final int chunkZ) throws IOException;
++
++ public boolean hasTasks() {
++ return !this.tasks.isEmpty();
++ }
++
++ public T computeForRegionFile(final int chunkX, final int chunkZ, final boolean existingOnly, final Function function) {
++ final RegionFileStorage cache = this.getCache();
++ final RegionFile regionFile;
++ synchronized (cache) {
++ try {
++ regionFile = cache.getRegionFile(new ChunkPos(chunkX, chunkZ), existingOnly, true);
++ } catch (final IOException ex) {
++ throw new RuntimeException(ex);
++ }
++ }
++
++ try {
++ return function.apply(regionFile);
++ } finally {
++ if (regionFile != null) {
++ regionFile.fileLock.unlock();
++ }
++ }
++ }
++
++ public T computeForRegionFileIfLoaded(final int chunkX, final int chunkZ, final Function function) {
++ final RegionFileStorage cache = this.getCache();
++ final RegionFile regionFile;
++
++ synchronized (cache) {
++ regionFile = cache.getRegionFileIfLoaded(new ChunkPos(chunkX, chunkZ));
++ if (regionFile != null) {
++ regionFile.fileLock.lock();
++ }
++ }
++
++ try {
++ return function.apply(regionFile);
++ } finally {
++ if (regionFile != null) {
++ regionFile.fileLock.unlock();
++ }
++ }
++ }
++ }
++
++ static final class ChunkDataTask implements Runnable {
++
++ protected static final CompoundTag NOTHING_TO_WRITE = new CompoundTag();
++
++ private static final Logger LOGGER = LogUtils.getLogger();
++
++ RegionFileIOThread.InProgressRead inProgressRead;
++ volatile CompoundTag inProgressWrite = NOTHING_TO_WRITE; // only needs to be acquire/release
++
++ boolean failedWrite;
++
++ final ServerLevel world;
++ final int chunkX;
++ final int chunkZ;
++ final RegionFileIOThread.ChunkDataController taskController;
++
++ final PrioritisedExecutor.PrioritisedTask prioritisedTask;
++
++ /*
++ * IO thread will perform reads before writes for a given chunk x and z
++ *
++ * How reads/writes are scheduled:
++ *
++ * If read is scheduled while scheduling write, take no special action and just schedule write
++ * If read is scheduled while scheduling read and no write is scheduled, chain the read task
++ *
++ *
++ * If write is scheduled while scheduling read, use the pending write data and ret immediately (so no read is scheduled)
++ * If write is scheduled while scheduling write (ignore read in progress), overwrite the write in progress data
++ *
++ * This allows the reads and writes to act as if they occur synchronously to the thread scheduling them, however
++ * it fails to properly propagate write failures thanks to writes overwriting each other
++ */
++
++ public ChunkDataTask(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileIOThread.ChunkDataController taskController,
++ final PrioritisedExecutor executor, final PrioritisedExecutor.Priority priority) {
++ this.world = world;
++ this.chunkX = chunkX;
++ this.chunkZ = chunkZ;
++ this.taskController = taskController;
++ this.prioritisedTask = executor.createTask(this, priority);
++ }
++
++ @Override
++ public String toString() {
++ return "Task for world: '" + this.world.getWorld().getName() + "' at (" + this.chunkX + "," + this.chunkZ +
++ ") type: " + this.taskController.type.name() + ", hash: " + this.hashCode();
++ }
++
++ @Override
++ public void run() {
++ final RegionFileIOThread.InProgressRead read = this.inProgressRead;
++ final ChunkCoordinate chunkKey = new ChunkCoordinate(CoordinateUtils.getChunkKey(this.chunkX, this.chunkZ));
++
++ if (read != null) {
++ final boolean[] canRead = new boolean[] { true };
++
++ if (read.waiters.isEmpty()) {
++ // cancelled read? go to task controller to confirm
++ final ChunkDataTask inMap = this.taskController.tasks.compute(chunkKey, (final ChunkCoordinate keyInMap, final ChunkDataTask valueInMap) -> {
++ if (valueInMap == null) {
++ throw new IllegalStateException("Write completed concurrently, expected this task: " + ChunkDataTask.this.toString() + ", report this!");
++ }
++ if (valueInMap != ChunkDataTask.this) {
++ throw new IllegalStateException("Chunk task mismatch, expected this task: " + ChunkDataTask.this.toString() + ", got: " + valueInMap.toString() + ", report this!");
++ }
++
++ if (!read.waiters.isEmpty()) { // as per usual IntelliJ is unable to figure out that there are concurrent accesses.
++ return valueInMap;
++ } else {
++ canRead[0] = false;
++ }
++
++ return valueInMap.inProgressWrite == NOTHING_TO_WRITE ? null : valueInMap;
++ });
++
++ if (inMap == null) {
++ // read is cancelled - and no write pending, so we're done
++ return;
++ }
++ // if there is a write in progress, we don't actually have to worry about waiters gaining new entries -
++ // the readers will just use the in progress write, so the value in canRead is good to use without
++ // further synchronisation.
++ }
++
++ if (canRead[0]) {
++ CompoundTag compound = null;
++ Throwable throwable = null;
++
++ try {
++ compound = this.taskController.readData(this.chunkX, this.chunkZ);
++ } catch (final ThreadDeath thr) {
++ throw thr;
++ } catch (final Throwable thr) {
++ throwable = thr;
++ LOGGER.error("Failed to read chunk data for task: " + this.toString(), thr);
++ }
++ read.complete(this, compound, throwable);
++ }
++ }
++
++ CompoundTag write = this.inProgressWrite;
++
++ if (write == NOTHING_TO_WRITE) {
++ final ChunkDataTask inMap = this.taskController.tasks.compute(chunkKey, (final ChunkCoordinate keyInMap, final ChunkDataTask valueInMap) -> {
++ if (valueInMap == null) {
++ throw new IllegalStateException("Write completed concurrently, expected this task: " + ChunkDataTask.this.toString() + ", report this!");
++ }
++ if (valueInMap != ChunkDataTask.this) {
++ throw new IllegalStateException("Chunk task mismatch, expected this task: " + ChunkDataTask.this.toString() + ", got: " + valueInMap.toString() + ", report this!");
++ }
++ return valueInMap.inProgressWrite == NOTHING_TO_WRITE ? null : valueInMap;
++ });
++
++ if (inMap == null) {
++ return; // set the task value to null, indicating we're done
++ } // else: inProgressWrite changed, so now we have something to write
++ }
++
++ for (;;) {
++ write = this.inProgressWrite;
++ final CompoundTag dataWritten = write;
++
++ boolean failedWrite = false;
++
++ try {
++ this.taskController.writeData(this.chunkX, this.chunkZ, write);
++ } catch (final ThreadDeath thr) {
++ throw thr;
++ } catch (final Throwable thr) {
++ if (thr instanceof RegionFileStorage.RegionFileSizeException) {
++ final int maxSize = RegionFile.MAX_CHUNK_SIZE / (1024 * 1024);
++ LOGGER.error("Chunk at (" + this.chunkX + "," + this.chunkZ + ") in '" + this.world.getWorld().getName() + "' exceeds max size of " + maxSize + "MiB, it has been deleted from disk.");
++ } else {
++ failedWrite = thr instanceof IOException;
++ LOGGER.error("Failed to write chunk data for task: " + this.toString(), thr);
++ }
++ }
++
++ final boolean finalFailWrite = failedWrite;
++ final boolean[] done = new boolean[] { false };
++
++ this.taskController.tasks.compute(chunkKey, (final ChunkCoordinate keyInMap, final ChunkDataTask valueInMap) -> {
++ if (valueInMap == null) {
++ throw new IllegalStateException("Write completed concurrently, expected this task: " + ChunkDataTask.this.toString() + ", report this!");
++ }
++ if (valueInMap != ChunkDataTask.this) {
++ throw new IllegalStateException("Chunk task mismatch, expected this task: " + ChunkDataTask.this.toString() + ", got: " + valueInMap.toString() + ", report this!");
++ }
++ if (valueInMap.inProgressWrite == dataWritten) {
++ valueInMap.failedWrite = finalFailWrite;
++ done[0] = true;
++ // keep the data in map if we failed the write so we can try to prevent data loss
++ return finalFailWrite ? valueInMap : null;
++ }
++ // different data than expected, means we need to retry write
++ return valueInMap;
++ });
++
++ if (done[0]) {
++ return;
++ }
++
++ // fetch & write new data
++ continue;
++ }
++ }
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/chunk/system/light/LightQueue.java b/src/main/java/io/papermc/paper/chunk/system/light/LightQueue.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..0b7a2b0ead4f3bc07bfd9a38c2b7cf024bd140c6
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/chunk/system/light/LightQueue.java
+@@ -0,0 +1,280 @@
++package io.papermc.paper.chunk.system.light;
++
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
++import ca.spottedleaf.starlight.common.light.BlockStarLightEngine;
++import ca.spottedleaf.starlight.common.light.SkyStarLightEngine;
++import ca.spottedleaf.starlight.common.light.StarLightInterface;
++import io.papermc.paper.chunk.system.scheduling.ChunkTaskScheduler;
++import io.papermc.paper.util.CoordinateUtils;
++import it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap;
++import it.unimi.dsi.fastutil.shorts.ShortCollection;
++import it.unimi.dsi.fastutil.shorts.ShortOpenHashSet;
++import net.minecraft.core.BlockPos;
++import net.minecraft.core.SectionPos;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.world.level.ChunkPos;
++import java.util.ArrayList;
++import java.util.HashSet;
++import java.util.List;
++import java.util.Set;
++import java.util.concurrent.CompletableFuture;
++import java.util.function.BooleanSupplier;
++
++public final class LightQueue {
++
++ protected final Long2ObjectOpenHashMap chunkTasks = new Long2ObjectOpenHashMap<>();
++ protected final StarLightInterface manager;
++ protected final ServerLevel world;
++
++ public LightQueue(final StarLightInterface manager) {
++ this.manager = manager;
++ this.world = ((ServerLevel)manager.getWorld());
++ }
++
++ public void lowerPriority(final int chunkX, final int chunkZ, final PrioritisedExecutor.Priority priority) {
++ final ChunkTasks task;
++ synchronized (this) {
++ task = this.chunkTasks.get(CoordinateUtils.getChunkKey(chunkX, chunkZ));
++ }
++ if (task != null) {
++ task.lowerPriority(priority);
++ }
++ }
++
++ public void setPriority(final int chunkX, final int chunkZ, final PrioritisedExecutor.Priority priority) {
++ final ChunkTasks task;
++ synchronized (this) {
++ task = this.chunkTasks.get(CoordinateUtils.getChunkKey(chunkX, chunkZ));
++ }
++ if (task != null) {
++ task.setPriority(priority);
++ }
++ }
++
++ public void raisePriority(final int chunkX, final int chunkZ, final PrioritisedExecutor.Priority priority) {
++ final ChunkTasks task;
++ synchronized (this) {
++ task = this.chunkTasks.get(CoordinateUtils.getChunkKey(chunkX, chunkZ));
++ }
++ if (task != null) {
++ task.raisePriority(priority);
++ }
++ }
++
++ public PrioritisedExecutor.Priority getPriority(final int chunkX, final int chunkZ) {
++ final ChunkTasks task;
++ synchronized (this) {
++ task = this.chunkTasks.get(CoordinateUtils.getChunkKey(chunkX, chunkZ));
++ }
++ if (task != null) {
++ return task.getPriority();
++ }
++
++ return PrioritisedExecutor.Priority.COMPLETING;
++ }
++
++ public boolean isEmpty() {
++ synchronized (this) {
++ return this.chunkTasks.isEmpty();
++ }
++ }
++
++ public CompletableFuture queueBlockChange(final BlockPos pos) {
++ final ChunkTasks tasks;
++ synchronized (this) {
++ tasks = this.chunkTasks.computeIfAbsent(CoordinateUtils.getChunkKey(pos), (final long keyInMap) -> {
++ return new ChunkTasks(keyInMap, LightQueue.this.manager, LightQueue.this);
++ });
++ tasks.changedPositions.add(pos.immutable());
++ }
++
++ tasks.schedule();
++
++ return tasks.onComplete;
++ }
++
++ public CompletableFuture queueSectionChange(final SectionPos pos, final boolean newEmptyValue) {
++ final ChunkTasks tasks;
++ synchronized (this) {
++ tasks = this.chunkTasks.computeIfAbsent(CoordinateUtils.getChunkKey(pos), (final long keyInMap) -> {
++ return new ChunkTasks(keyInMap, LightQueue.this.manager, LightQueue.this);
++ });
++
++ if (tasks.changedSectionSet == null) {
++ tasks.changedSectionSet = new Boolean[this.manager.maxSection - this.manager.minSection + 1];
++ }
++ tasks.changedSectionSet[pos.getY() - this.manager.minSection] = Boolean.valueOf(newEmptyValue);
++ }
++
++ tasks.schedule();
++
++ return tasks.onComplete;
++ }
++
++ public CompletableFuture queueChunkLightTask(final ChunkPos pos, final BooleanSupplier lightTask, final PrioritisedExecutor.Priority priority) {
++ final ChunkTasks tasks;
++ synchronized (this) {
++ tasks = this.chunkTasks.computeIfAbsent(CoordinateUtils.getChunkKey(pos), (final long keyInMap) -> {
++ return new ChunkTasks(keyInMap, LightQueue.this.manager, LightQueue.this, priority);
++ });
++ if (tasks.lightTasks == null) {
++ tasks.lightTasks = new ArrayList<>();
++ }
++ tasks.lightTasks.add(lightTask);
++ }
++
++ tasks.schedule();
++
++ return tasks.onComplete;
++ }
++
++ public CompletableFuture queueChunkSkylightEdgeCheck(final SectionPos pos, final ShortCollection sections) {
++ final ChunkTasks tasks;
++ synchronized (this) {
++ tasks = this.chunkTasks.computeIfAbsent(CoordinateUtils.getChunkKey(pos), (final long keyInMap) -> {
++ return new ChunkTasks(keyInMap, LightQueue.this.manager, LightQueue.this);
++ });
++
++ ShortOpenHashSet queuedEdges = tasks.queuedEdgeChecksSky;
++ if (queuedEdges == null) {
++ queuedEdges = tasks.queuedEdgeChecksSky = new ShortOpenHashSet();
++ }
++ queuedEdges.addAll(sections);
++ }
++
++ tasks.schedule();
++
++ return tasks.onComplete;
++ }
++
++ public CompletableFuture queueChunkBlocklightEdgeCheck(final SectionPos pos, final ShortCollection sections) {
++ final ChunkTasks tasks;
++
++ synchronized (this) {
++ tasks = this.chunkTasks.computeIfAbsent(CoordinateUtils.getChunkKey(pos), (final long keyInMap) -> {
++ return new ChunkTasks(keyInMap, LightQueue.this.manager, LightQueue.this);
++ });
++
++ ShortOpenHashSet queuedEdges = tasks.queuedEdgeChecksBlock;
++ if (queuedEdges == null) {
++ queuedEdges = tasks.queuedEdgeChecksBlock = new ShortOpenHashSet();
++ }
++ queuedEdges.addAll(sections);
++ }
++
++ tasks.schedule();
++
++ return tasks.onComplete;
++ }
++
++ public void removeChunk(final ChunkPos pos) {
++ final ChunkTasks tasks;
++ synchronized (this) {
++ tasks = this.chunkTasks.remove(CoordinateUtils.getChunkKey(pos));
++ }
++ if (tasks != null && tasks.cancel()) {
++ tasks.onComplete.complete(null);
++ }
++ }
++
++ protected static final class ChunkTasks implements Runnable {
++
++ final Set changedPositions = new HashSet<>();
++ Boolean[] changedSectionSet;
++ ShortOpenHashSet queuedEdgeChecksSky;
++ ShortOpenHashSet queuedEdgeChecksBlock;
++ List lightTasks;
++
++ final CompletableFuture onComplete = new CompletableFuture<>();
++
++ public final long chunkCoordinate;
++ private final StarLightInterface lightEngine;
++ private final LightQueue queue;
++ private final PrioritisedExecutor.PrioritisedTask task;
++
++ public ChunkTasks(final long chunkCoordinate, final StarLightInterface lightEngine, final LightQueue queue) {
++ this(chunkCoordinate, lightEngine, queue, PrioritisedExecutor.Priority.NORMAL);
++ }
++
++ public ChunkTasks(final long chunkCoordinate, final StarLightInterface lightEngine, final LightQueue queue,
++ final PrioritisedExecutor.Priority priority) {
++ this.chunkCoordinate = chunkCoordinate;
++ this.lightEngine = lightEngine;
++ this.queue = queue;
++ this.task = queue.world.chunkTaskScheduler.lightExecutor.createTask(this, priority);
++ }
++
++ public void schedule() {
++ this.task.queue();
++ }
++
++ public boolean cancel() {
++ return this.task.cancel();
++ }
++
++ public PrioritisedExecutor.Priority getPriority() {
++ return this.task.getPriority();
++ }
++
++ public void lowerPriority(final PrioritisedExecutor.Priority priority) {
++ this.task.lowerPriority(priority);
++ }
++
++ public void setPriority(final PrioritisedExecutor.Priority priority) {
++ this.task.setPriority(priority);
++ }
++
++ public void raisePriority(final PrioritisedExecutor.Priority priority) {
++ this.task.raisePriority(priority);
++ }
++
++ @Override
++ public void run() {
++ final SkyStarLightEngine skyEngine = this.lightEngine.getSkyLightEngine();
++ final BlockStarLightEngine blockEngine = this.lightEngine.getBlockLightEngine();
++ try {
++ synchronized (this.queue) {
++ this.queue.chunkTasks.remove(this.chunkCoordinate);
++ }
++
++ boolean litChunk = false;
++ if (this.lightTasks != null) {
++ for (final BooleanSupplier run : this.lightTasks) {
++ if (run.getAsBoolean()) {
++ litChunk = true;
++ break;
++ }
++ }
++ }
++
++ final long coordinate = this.chunkCoordinate;
++ final int chunkX = CoordinateUtils.getChunkX(coordinate);
++ final int chunkZ = CoordinateUtils.getChunkZ(coordinate);
++
++ final Set positions = this.changedPositions;
++ final Boolean[] sectionChanges = this.changedSectionSet;
++
++ if (!litChunk) {
++ if (skyEngine != null && (!positions.isEmpty() || sectionChanges != null)) {
++ skyEngine.blocksChangedInChunk(this.lightEngine.getLightAccess(), chunkX, chunkZ, positions, sectionChanges);
++ }
++ if (blockEngine != null && (!positions.isEmpty() || sectionChanges != null)) {
++ blockEngine.blocksChangedInChunk(this.lightEngine.getLightAccess(), chunkX, chunkZ, positions, sectionChanges);
++ }
++
++ if (skyEngine != null && this.queuedEdgeChecksSky != null) {
++ skyEngine.checkChunkEdges(this.lightEngine.getLightAccess(), chunkX, chunkZ, this.queuedEdgeChecksSky);
++ }
++ if (blockEngine != null && this.queuedEdgeChecksBlock != null) {
++ blockEngine.checkChunkEdges(this.lightEngine.getLightAccess(), chunkX, chunkZ, this.queuedEdgeChecksBlock);
++ }
++ }
++
++ this.onComplete.complete(null);
++ } finally {
++ this.lightEngine.releaseSkyLightEngine(skyEngine);
++ this.lightEngine.releaseBlockLightEngine(blockEngine);
++ }
++ }
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/chunk/system/poi/PoiChunk.java b/src/main/java/io/papermc/paper/chunk/system/poi/PoiChunk.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..b1bdd79044c00635c836dbed327526136ca4bd4e
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/chunk/system/poi/PoiChunk.java
+@@ -0,0 +1,211 @@
++package io.papermc.paper.chunk.system.poi;
++
++import com.mojang.logging.LogUtils;
++import com.mojang.serialization.Codec;
++import com.mojang.serialization.DataResult;
++import io.papermc.paper.util.CoordinateUtils;
++import io.papermc.paper.util.TickThread;
++import io.papermc.paper.util.WorldUtil;
++import net.minecraft.SharedConstants;
++import net.minecraft.nbt.CompoundTag;
++import net.minecraft.nbt.NbtOps;
++import net.minecraft.nbt.Tag;
++import net.minecraft.resources.RegistryOps;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.world.entity.ai.village.poi.PoiManager;
++import net.minecraft.world.entity.ai.village.poi.PoiSection;
++import org.slf4j.Logger;
++
++import java.util.Optional;
++
++public final class PoiChunk {
++
++ private static final Logger LOGGER = LogUtils.getLogger();
++
++ public final ServerLevel world;
++ public final int chunkX;
++ public final int chunkZ;
++ public final int minSection;
++ public final int maxSection;
++
++ protected final PoiSection[] sections;
++
++ private boolean isDirty;
++ private boolean loaded;
++
++ public PoiChunk(final ServerLevel world, final int chunkX, final int chunkZ, final int minSection, final int maxSection) {
++ this(world, chunkX, chunkZ, minSection, maxSection, new PoiSection[maxSection - minSection + 1]);
++ }
++
++ public PoiChunk(final ServerLevel world, final int chunkX, final int chunkZ, final int minSection, final int maxSection, final PoiSection[] sections) {
++ this.world = world;
++ this.chunkX = chunkX;
++ this.chunkZ = chunkZ;
++ this.minSection = minSection;
++ this.maxSection = maxSection;
++ this.sections = sections;
++ if (this.sections.length != (maxSection - minSection + 1)) {
++ throw new IllegalStateException("Incorrect length used, expected " + (maxSection - minSection + 1) + ", got " + this.sections.length);
++ }
++ }
++
++ public void load() {
++ TickThread.ensureTickThread(this.world, this.chunkX, this.chunkZ, "Loading in poi chunk off-main");
++ if (this.loaded) {
++ return;
++ }
++ this.loaded = true;
++ this.world.chunkSource.getPoiManager().loadInPoiChunk(this);
++ }
++
++ public boolean isLoaded() {
++ return this.loaded;
++ }
++
++ public boolean isEmpty() {
++ for (final PoiSection section : this.sections) {
++ if (section != null && !section.isEmpty()) {
++ return false;
++ }
++ }
++
++ return true;
++ }
++
++ public PoiSection getOrCreateSection(final int chunkY) {
++ if (chunkY >= this.minSection && chunkY <= this.maxSection) {
++ final int idx = chunkY - this.minSection;
++ final PoiSection ret = this.sections[idx];
++ if (ret != null) {
++ return ret;
++ }
++
++ final PoiManager poiManager = this.world.getPoiManager();
++ final long key = CoordinateUtils.getChunkSectionKey(this.chunkX, chunkY, this.chunkZ);
++
++ return this.sections[idx] = new PoiSection(() -> {
++ poiManager.setDirty(key);
++ });
++ }
++ throw new IllegalArgumentException("chunkY is out of bounds, chunkY: " + chunkY + " outside [" + this.minSection + "," + this.maxSection + "]");
++ }
++
++ public PoiSection getSection(final int chunkY) {
++ if (chunkY >= this.minSection && chunkY <= this.maxSection) {
++ return this.sections[chunkY - this.minSection];
++ }
++ return null;
++ }
++
++ public Optional getSectionForVanilla(final int chunkY) {
++ if (chunkY >= this.minSection && chunkY <= this.maxSection) {
++ final PoiSection ret = this.sections[chunkY - this.minSection];
++ return ret == null ? Optional.empty() : ret.noAllocateOptional;
++ }
++ return Optional.empty();
++ }
++
++ public boolean isDirty() {
++ return this.isDirty;
++ }
++
++ public void setDirty(final boolean dirty) {
++ this.isDirty = dirty;
++ }
++
++ // returns null if empty
++ public CompoundTag save() {
++ final RegistryOps registryOps = RegistryOps.create(NbtOps.INSTANCE, world.getPoiManager().registryAccess);
++
++ final CompoundTag ret = new CompoundTag();
++ final CompoundTag sections = new CompoundTag();
++ ret.put("Sections", sections);
++
++ ret.putInt("DataVersion", SharedConstants.getCurrentVersion().getWorldVersion());
++
++ final ServerLevel world = this.world;
++ final PoiManager poiManager = world.getPoiManager();
++ final int chunkX = this.chunkX;
++ final int chunkZ = this.chunkZ;
++
++ for (int sectionY = this.minSection; sectionY <= this.maxSection; ++sectionY) {
++ final PoiSection chunk = this.sections[sectionY - this.minSection];
++ if (chunk == null || chunk.isEmpty()) {
++ continue;
++ }
++
++ final long key = CoordinateUtils.getChunkSectionKey(chunkX, sectionY, chunkZ);
++ // codecs are honestly such a fucking disaster. What the fuck is this trash?
++ final Codec codec = PoiSection.codec(() -> {
++ poiManager.setDirty(key);
++ });
++
++ final DataResult serializedResult = codec.encodeStart(registryOps, chunk);
++ final int finalSectionY = sectionY;
++ final Tag serialized = serializedResult.resultOrPartial((final String description) -> {
++ LOGGER.error("Failed to serialize poi chunk for world: " + world.getWorld().getName() + ", chunk: (" + chunkX + "," + finalSectionY + "," + chunkZ + "); description: " + description);
++ }).orElse(null);
++ if (serialized == null) {
++ // failed, should be logged from the resultOrPartial
++ continue;
++ }
++
++ sections.put(Integer.toString(sectionY), serialized);
++ }
++
++ return sections.isEmpty() ? null : ret;
++ }
++
++ public static PoiChunk empty(final ServerLevel world, final int chunkX, final int chunkZ) {
++ final PoiChunk ret = new PoiChunk(world, chunkX, chunkZ, WorldUtil.getMinSection(world), WorldUtil.getMaxSection(world));
++ ret.loaded = true;
++ return ret;
++ }
++
++ public static PoiChunk parse(final ServerLevel world, final int chunkX, final int chunkZ, final CompoundTag data) {
++ final PoiChunk ret = empty(world, chunkX, chunkZ);
++
++ final RegistryOps registryOps = RegistryOps.create(NbtOps.INSTANCE, world.getPoiManager().registryAccess);
++
++ final CompoundTag sections = data.getCompound("Sections");
++
++ if (sections.isEmpty()) {
++ // nothing to parse
++ return ret;
++ }
++
++ final PoiManager poiManager = world.getPoiManager();
++
++ boolean readAnything = false;
++
++ for (int sectionY = ret.minSection; sectionY <= ret.maxSection; ++sectionY) {
++ final String key = Integer.toString(sectionY);
++ if (!sections.contains(key)) {
++ continue;
++ }
++
++ final long coordinateKey = CoordinateUtils.getChunkSectionKey(chunkX, sectionY, chunkZ);
++ // codecs are honestly such a fucking disaster. What the fuck is this trash?
++ final Codec codec = PoiSection.codec(() -> {
++ poiManager.setDirty(coordinateKey);
++ });
++
++ final CompoundTag section = sections.getCompound(key);
++ final DataResult deserializeResult = codec.parse(registryOps, section);
++ final int finalSectionY = sectionY;
++ final PoiSection deserialized = deserializeResult.resultOrPartial((final String description) -> {
++ LOGGER.error("Failed to deserialize poi chunk for world: " + world.getWorld().getName() + ", chunk: (" + chunkX + "," + finalSectionY + "," + chunkZ + "); description: " + description);
++ }).orElse(null);
++
++ if (deserialized == null || deserialized.isEmpty()) {
++ // completely empty, no point in storing this
++ continue;
++ }
++
++ readAnything = true;
++ ret.sections[sectionY - ret.minSection] = deserialized;
++ }
++
++ return ret;
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkFullTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkFullTask.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..b02619d7111c52d1b4e3b50267e54da31d6161e3
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkFullTask.java
+@@ -0,0 +1,125 @@
++package io.papermc.paper.chunk.system.scheduling;
++
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
++import com.mojang.logging.LogUtils;
++import net.minecraft.server.level.ChunkMap;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.world.level.chunk.ChunkAccess;
++import net.minecraft.world.level.chunk.ChunkStatus;
++import net.minecraft.world.level.chunk.ImposterProtoChunk;
++import net.minecraft.world.level.chunk.LevelChunk;
++import net.minecraft.world.level.chunk.ProtoChunk;
++import org.slf4j.Logger;
++import java.lang.invoke.VarHandle;
++
++public final class ChunkFullTask extends ChunkProgressionTask implements Runnable {
++
++ private static final Logger LOGGER = LogUtils.getLogger();
++
++ protected final NewChunkHolder chunkHolder;
++ protected final ChunkAccess fromChunk;
++ protected final PrioritisedExecutor.PrioritisedTask convertToFullTask;
++
++ public ChunkFullTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX, final int chunkZ,
++ final NewChunkHolder chunkHolder, final ChunkAccess fromChunk, final PrioritisedExecutor.Priority priority) {
++ super(scheduler, world, chunkX, chunkZ);
++ this.chunkHolder = chunkHolder;
++ this.fromChunk = fromChunk;
++ this.convertToFullTask = scheduler.createChunkTask(chunkX, chunkZ, this, priority);
++ }
++
++ @Override
++ public ChunkStatus getTargetStatus() {
++ return ChunkStatus.FULL;
++ }
++
++ @Override
++ public void run() {
++ // See Vanilla protoChunkToFullChunk for what this function should be doing
++ final LevelChunk chunk;
++ try {
++ if (this.fromChunk instanceof ImposterProtoChunk wrappedFull) {
++ chunk = wrappedFull.getWrapped();
++ } else {
++ final ServerLevel world = this.world;
++ final ProtoChunk protoChunk = (ProtoChunk)this.fromChunk;
++ chunk = new LevelChunk(this.world, protoChunk, (final LevelChunk unused) -> {
++ ChunkMap.postLoadProtoChunk(world, protoChunk.getEntities());
++ });
++ }
++
++ chunk.setChunkHolder(this.scheduler.chunkHolderManager.getChunkHolder(this.chunkX, this.chunkZ)); // replaces setFullStatus
++ chunk.runPostLoad();
++ // Unlike Vanilla, we load the entity chunk here, as we load the NBT in empty status (unlike Vanilla)
++ // This brings entity addition back in line with older versions of the game
++ // Since we load the NBT in the empty status, this will never block for I/O
++ this.world.chunkTaskScheduler.chunkHolderManager.getOrCreateEntityChunk(this.chunkX, this.chunkZ, false);
++
++ // we don't need the entitiesInLevel trash, this system doesn't double run callbacks
++ chunk.setLoaded(true);
++ chunk.registerAllBlockEntitiesAfterLevelLoad();
++ chunk.registerTickContainerInLevel(this.world);
++ } catch (final Throwable throwable) {
++ this.complete(null, throwable);
++
++ if (throwable instanceof ThreadDeath) {
++ throw (ThreadDeath)throwable;
++ }
++ return;
++ }
++ this.complete(chunk, null);
++ }
++
++ protected volatile boolean scheduled;
++ protected static final VarHandle SCHEDULED_HANDLE = ConcurrentUtil.getVarHandle(ChunkFullTask.class, "scheduled", boolean.class);
++
++ @Override
++ public boolean isScheduled() {
++ return this.scheduled;
++ }
++
++ @Override
++ public void schedule() {
++ if ((boolean)SCHEDULED_HANDLE.getAndSet((ChunkFullTask)this, true)) {
++ throw new IllegalStateException("Cannot double call schedule()");
++ }
++ this.convertToFullTask.queue();
++ }
++
++ @Override
++ public void cancel() {
++ if (this.convertToFullTask.cancel()) {
++ this.complete(null, null);
++ }
++ }
++
++ @Override
++ public PrioritisedExecutor.Priority getPriority() {
++ return this.convertToFullTask.getPriority();
++ }
++
++ @Override
++ public void lowerPriority(final PrioritisedExecutor.Priority priority) {
++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority " + priority);
++ }
++ this.convertToFullTask.lowerPriority(priority);
++ }
++
++ @Override
++ public void setPriority(final PrioritisedExecutor.Priority priority) {
++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority " + priority);
++ }
++ this.convertToFullTask.setPriority(priority);
++ }
++
++ @Override
++ public void raisePriority(final PrioritisedExecutor.Priority priority) {
++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority " + priority);
++ }
++ this.convertToFullTask.raisePriority(priority);
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..a07d920d453c8687ed86d9f9449537c3eb18041e
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java
+@@ -0,0 +1,1190 @@
++package io.papermc.paper.chunk.system.scheduling;
++
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
++import ca.spottedleaf.concurrentutil.map.SWMRLong2ObjectHashTable;
++import co.aikar.timings.Timing;
++import com.google.common.collect.ImmutableList;
++import com.google.gson.JsonArray;
++import com.google.gson.JsonObject;
++import com.mojang.logging.LogUtils;
++import io.papermc.paper.chunk.system.io.RegionFileIOThread;
++import io.papermc.paper.chunk.system.poi.PoiChunk;
++import io.papermc.paper.util.CoordinateUtils;
++import io.papermc.paper.util.TickThread;
++import io.papermc.paper.util.misc.Delayed8WayDistancePropagator2D;
++import io.papermc.paper.world.ChunkEntitySlices;
++import it.unimi.dsi.fastutil.longs.Long2IntLinkedOpenHashMap;
++import it.unimi.dsi.fastutil.longs.Long2IntMap;
++import it.unimi.dsi.fastutil.longs.Long2IntOpenHashMap;
++import it.unimi.dsi.fastutil.longs.Long2ObjectMap;
++import it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap;
++import it.unimi.dsi.fastutil.longs.LongArrayList;
++import it.unimi.dsi.fastutil.longs.LongIterator;
++import it.unimi.dsi.fastutil.objects.ObjectRBTreeSet;
++import it.unimi.dsi.fastutil.objects.ReferenceLinkedOpenHashSet;
++import net.minecraft.nbt.CompoundTag;
++import net.minecraft.server.ChunkSystem;
++import net.minecraft.server.MinecraftServer;
++import net.minecraft.server.level.ChunkHolder;
++import net.minecraft.server.level.ChunkMap;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.server.level.Ticket;
++import net.minecraft.server.level.TicketType;
++import net.minecraft.util.SortedArraySet;
++import net.minecraft.util.Unit;
++import net.minecraft.world.level.ChunkPos;
++import net.minecraft.world.level.chunk.ChunkAccess;
++import net.minecraft.world.level.chunk.ChunkStatus;
++import org.bukkit.plugin.Plugin;
++import org.slf4j.Logger;
++import java.io.IOException;
++import java.text.DecimalFormat;
++import java.util.ArrayDeque;
++import java.util.ArrayList;
++import java.util.Collection;
++import java.util.Collections;
++import java.util.Iterator;
++import java.util.List;
++import java.util.Objects;
++import java.util.concurrent.TimeUnit;
++import java.util.concurrent.atomic.AtomicBoolean;
++import java.util.concurrent.atomic.AtomicReference;
++import java.util.concurrent.locks.LockSupport;
++import java.util.concurrent.locks.ReentrantLock;
++import java.util.function.Predicate;
++
++public final class ChunkHolderManager {
++
++ private static final Logger LOGGER = LogUtils.getLogger();
++
++ public static final int FULL_LOADED_TICKET_LEVEL = 33;
++ public static final int BLOCK_TICKING_TICKET_LEVEL = 32;
++ public static final int ENTITY_TICKING_TICKET_LEVEL = 31;
++ public static final int MAX_TICKET_LEVEL = ChunkMap.MAX_CHUNK_DISTANCE; // inclusive
++
++ private static final long NO_TIMEOUT_MARKER = -1L;
++
++ final ReentrantLock ticketLock = new ReentrantLock();
++
++ private final SWMRLong2ObjectHashTable chunkHolders = new SWMRLong2ObjectHashTable<>(16384, 0.25f);
++ private final Long2ObjectOpenHashMap>> tickets = new Long2ObjectOpenHashMap<>(8192, 0.25f);
++ // what a disaster of a name
++ // this is a map of removal tick to a map of chunks and the number of tickets a chunk has that are to expire that tick
++ private final Long2ObjectOpenHashMap removeTickToChunkExpireTicketCount = new Long2ObjectOpenHashMap<>();
++ private final ServerLevel world;
++ private final ChunkTaskScheduler taskScheduler;
++ private long currentTick;
++
++ private final ArrayDeque pendingFullLoadUpdate = new ArrayDeque<>();
++ private final ObjectRBTreeSet autoSaveQueue = new ObjectRBTreeSet<>((final NewChunkHolder c1, final NewChunkHolder c2) -> {
++ if (c1 == c2) {
++ return 0;
++ }
++
++ final int saveTickCompare = Long.compare(c1.lastAutoSave, c2.lastAutoSave);
++
++ if (saveTickCompare != 0) {
++ return saveTickCompare;
++ }
++
++ final long coord1 = CoordinateUtils.getChunkKey(c1.chunkX, c1.chunkZ);
++ final long coord2 = CoordinateUtils.getChunkKey(c2.chunkX, c2.chunkZ);
++
++ if (coord1 == coord2) {
++ throw new IllegalStateException("Duplicate chunkholder in auto save queue");
++ }
++
++ return Long.compare(coord1, coord2);
++ });
++
++ public ChunkHolderManager(final ServerLevel world, final ChunkTaskScheduler taskScheduler) {
++ this.world = world;
++ this.taskScheduler = taskScheduler;
++ }
++
++ private long statusUpgradeId;
++
++ long getNextStatusUpgradeId() {
++ return ++this.statusUpgradeId;
++ }
++
++ public List getOldChunkHolders() {
++ final List holders = this.getChunkHolders();
++ final List ret = new ArrayList<>(holders.size());
++ for (final NewChunkHolder holder : holders) {
++ ret.add(holder.vanillaChunkHolder);
++ }
++ return ret;
++ }
++
++ public List getChunkHolders() {
++ final List ret = new ArrayList<>(this.chunkHolders.size());
++ this.chunkHolders.forEachValue(ret::add);
++ return ret;
++ }
++
++ public int size() {
++ return this.chunkHolders.size();
++ }
++
++ public void close(final boolean save, final boolean halt) {
++ TickThread.ensureTickThread("Closing world off-main");
++ if (halt) {
++ LOGGER.info("Waiting 60s for chunk system to halt for world '" + this.world.getWorld().getName() + "'");
++ if (!this.taskScheduler.halt(true, TimeUnit.SECONDS.toNanos(60L))) {
++ LOGGER.warn("Failed to halt world generation/loading tasks for world '" + this.world.getWorld().getName() + "'");
++ } else {
++ LOGGER.info("Halted chunk system for world '" + this.world.getWorld().getName() + "'");
++ }
++ }
++
++ if (save) {
++ this.saveAllChunks(true, true, true);
++ }
++
++ if (this.world.chunkDataControllerNew.hasTasks() || this.world.entityDataControllerNew.hasTasks() || this.world.poiDataControllerNew.hasTasks()) {
++ RegionFileIOThread.flush();
++ }
++
++ // kill regionfile cache
++ try {
++ this.world.chunkDataControllerNew.getCache().close();
++ } catch (final IOException ex) {
++ LOGGER.error("Failed to close chunk regionfile cache for world '" + this.world.getWorld().getName() + "'", ex);
++ }
++ try {
++ this.world.entityDataControllerNew.getCache().close();
++ } catch (final IOException ex) {
++ LOGGER.error("Failed to close entity regionfile cache for world '" + this.world.getWorld().getName() + "'", ex);
++ }
++ try {
++ this.world.poiDataControllerNew.getCache().close();
++ } catch (final IOException ex) {
++ LOGGER.error("Failed to close poi regionfile cache for world '" + this.world.getWorld().getName() + "'", ex);
++ }
++ }
++
++ void ensureInAutosave(final NewChunkHolder holder) {
++ if (!this.autoSaveQueue.contains(holder)) {
++ holder.lastAutoSave = MinecraftServer.currentTick;
++ this.autoSaveQueue.add(holder);
++ }
++ }
++
++ public void autoSave() {
++ final List reschedule = new ArrayList<>();
++ final long currentTick = MinecraftServer.currentTickLong;
++ final long maxSaveTime = currentTick - this.world.paperConfig().chunks.autoSaveInterval.value();
++ for (int autoSaved = 0; autoSaved < this.world.paperConfig().chunks.maxAutoSaveChunksPerTick && !this.autoSaveQueue.isEmpty();) {
++ final NewChunkHolder holder = this.autoSaveQueue.first();
++
++ if (holder.lastAutoSave > maxSaveTime) {
++ break;
++ }
++
++ this.autoSaveQueue.remove(holder);
++
++ holder.lastAutoSave = currentTick;
++ if (holder.save(false, false)) {
++ ++autoSaved;
++ }
++
++ if (holder.getChunkStatus().isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
++ reschedule.add(holder);
++ }
++ }
++
++ for (final NewChunkHolder holder : reschedule) {
++ if (holder.getChunkStatus().isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
++ this.autoSaveQueue.add(holder);
++ }
++ }
++ }
++
++ public void saveAllChunks(final boolean flush, final boolean shutdown, final boolean logProgress) {
++ final List holders = this.getChunkHolders();
++
++ if (logProgress) {
++ LOGGER.info("Saving all chunkholders for world '" + this.world.getWorld().getName() + "'");
++ }
++
++ final DecimalFormat format = new DecimalFormat("#0.00");
++
++ int saved = 0;
++
++ long start = System.nanoTime();
++ long lastLog = start;
++ boolean needsFlush = false;
++ final int flushInterval = 50;
++
++ for (int i = 0, len = holders.size(); i < len; ++i) {
++ final NewChunkHolder holder = holders.get(i);
++ try {
++ if (holder.save(shutdown, false)) {
++ ++saved;
++ needsFlush = flush;
++ }
++ } catch (final ThreadDeath thr) {
++ throw thr;
++ } catch (final Throwable thr) {
++ LOGGER.error("Failed to save chunk (" + holder.chunkX + "," + holder.chunkZ + ") in world '" + this.world.getWorld().getName() + "'", thr);
++ }
++ if (needsFlush && (saved % flushInterval) == 0) {
++ needsFlush = false;
++ RegionFileIOThread.partialFlush(flushInterval / 2);
++ }
++ if (logProgress) {
++ final long currTime = System.nanoTime();
++ if ((currTime - lastLog) > TimeUnit.SECONDS.toNanos(10L)) {
++ lastLog = currTime;
++ LOGGER.info("Saved " + saved + " chunks (" + format.format((double)(i+1)/(double)len * 100.0) + "%) in world '" + this.world.getWorld().getName() + "'");
++ }
++ }
++ }
++ if (flush) {
++ RegionFileIOThread.flush();
++ }
++ if (logProgress) {
++ LOGGER.info("Saved " + saved + " chunks in world '" + this.world.getWorld().getName() + "' in " + TimeUnit.NANOSECONDS.toSeconds(System.nanoTime() - start) + "s");
++ }
++ }
++
++ protected final Long2IntLinkedOpenHashMap ticketLevelUpdates = new Long2IntLinkedOpenHashMap() {
++ @Override
++ protected void rehash(final int newN) {
++ // no downsizing allowed
++ if (newN < this.n) {
++ return;
++ }
++ super.rehash(newN);
++ }
++ };
++
++ protected final Delayed8WayDistancePropagator2D ticketLevelPropagator = new Delayed8WayDistancePropagator2D(
++ (final long coordinate, final byte oldLevel, final byte newLevel) -> {
++ ChunkHolderManager.this.ticketLevelUpdates.putAndMoveToLast(coordinate, convertBetweenTicketLevels(newLevel));
++ }
++ );
++ // function for converting between ticket levels and propagator levels and vice versa
++ // the problem is the ticket level propagator will propagate from a set source down to zero, whereas mojang expects
++ // levels to propagate from a set value up to a maximum value. so we need to convert the levels we put into the propagator
++ // and the levels we get out of the propagator
++
++ public static int convertBetweenTicketLevels(final int level) {
++ return ChunkMap.MAX_CHUNK_DISTANCE - level + 1;
++ }
++
++ public boolean hasTickets() {
++ this.ticketLock.lock();
++ try {
++ return !this.tickets.isEmpty();
++ } finally {
++ this.ticketLock.unlock();
++ }
++ }
++
++ public String getTicketDebugString(final long coordinate) {
++ this.ticketLock.lock();
++ try {
++ final SortedArraySet> tickets = this.tickets.get(coordinate);
++
++ return tickets != null ? tickets.first().toString() : "no_ticket";
++ } finally {
++ this.ticketLock.unlock();
++ }
++ }
++
++ public Long2ObjectOpenHashMap>> getTicketsCopy() {
++ this.ticketLock.lock();
++ try {
++ return this.tickets.clone();
++ } finally {
++ this.ticketLock.unlock();
++ }
++ }
++
++ public Collection getPluginChunkTickets(int x, int z) {
++ ImmutableList.Builder ret;
++ this.ticketLock.lock();
++ try {
++ SortedArraySet> tickets = this.tickets.get(ChunkPos.asLong(x, z));
++
++ if (tickets == null) {
++ return Collections.emptyList();
++ }
++
++ ret = ImmutableList.builder();
++ for (Ticket> ticket : tickets) {
++ if (ticket.getType() == TicketType.PLUGIN_TICKET) {
++ ret.add((Plugin)ticket.key);
++ }
++ }
++ } finally {
++ this.ticketLock.unlock();
++ }
++
++ return ret.build();
++ }
++
++ protected final int getPropagatedTicketLevel(final long coordinate) {
++ return convertBetweenTicketLevels(this.ticketLevelPropagator.getLevel(coordinate));
++ }
++
++ protected final void updateTicketLevel(final long coordinate, final int ticketLevel) {
++ if (ticketLevel > ChunkMap.MAX_CHUNK_DISTANCE) {
++ this.ticketLevelPropagator.removeSource(coordinate);
++ } else {
++ this.ticketLevelPropagator.setSource(coordinate, convertBetweenTicketLevels(ticketLevel));
++ }
++ }
++
++ private static int getTicketLevelAt(SortedArraySet> tickets) {
++ return !tickets.isEmpty() ? tickets.first().getTicketLevel() : MAX_TICKET_LEVEL + 1;
++ }
++
++ public boolean addTicketAtLevel(final TicketType type, final ChunkPos chunkPos, final int level,
++ final T identifier) {
++ return this.addTicketAtLevel(type, CoordinateUtils.getChunkKey(chunkPos), level, identifier);
++ }
++
++ public boolean addTicketAtLevel(final TicketType type, final int chunkX, final int chunkZ, final int level,
++ final T identifier) {
++ return this.addTicketAtLevel(type, CoordinateUtils.getChunkKey(chunkX, chunkZ), level, identifier);
++ }
++
++ // supposed to return true if the ticket was added and did not replace another
++ // but, we always return false if the ticket cannot be added
++ public boolean addTicketAtLevel(final TicketType type, final long chunk, final int level, final T identifier) {
++ final long removeDelay = Math.max(0, type.timeout);
++ if (level > MAX_TICKET_LEVEL) {
++ return false;
++ }
++
++ this.ticketLock.lock();
++ try {
++ final long removeTick = removeDelay == 0 ? NO_TIMEOUT_MARKER : this.currentTick + removeDelay;
++ final Ticket ticket = new Ticket<>(type, level, identifier, removeTick);
++
++ final SortedArraySet> ticketsAtChunk = this.tickets.computeIfAbsent(chunk, (final long keyInMap) -> {
++ return SortedArraySet.create(4);
++ });
++
++ final int levelBefore = getTicketLevelAt(ticketsAtChunk);
++ final Ticket current = (Ticket)ticketsAtChunk.replace(ticket);
++ final int levelAfter = getTicketLevelAt(ticketsAtChunk);
++
++ if (current != ticket) {
++ final long oldRemovalTick = current.removalTick;
++ if (removeTick != oldRemovalTick) {
++ if (oldRemovalTick != NO_TIMEOUT_MARKER) {
++ final Long2IntOpenHashMap removeCounts = this.removeTickToChunkExpireTicketCount.get(oldRemovalTick);
++ final int prevCount = removeCounts.addTo(chunk, -1);
++
++ if (prevCount == 1) {
++ removeCounts.remove(chunk);
++ if (removeCounts.isEmpty()) {
++ this.removeTickToChunkExpireTicketCount.remove(oldRemovalTick);
++ }
++ }
++ }
++ if (removeTick != NO_TIMEOUT_MARKER) {
++ this.removeTickToChunkExpireTicketCount.computeIfAbsent(removeTick, (final long keyInMap) -> {
++ return new Long2IntOpenHashMap();
++ }).addTo(chunk, 1);
++ }
++ }
++ } else {
++ if (removeTick != NO_TIMEOUT_MARKER) {
++ this.removeTickToChunkExpireTicketCount.computeIfAbsent(removeTick, (final long keyInMap) -> {
++ return new Long2IntOpenHashMap();
++ }).addTo(chunk, 1);
++ }
++ }
++
++ if (levelBefore != levelAfter) {
++ this.updateTicketLevel(chunk, levelAfter);
++ }
++
++ return current == ticket;
++ } finally {
++ this.ticketLock.unlock();
++ }
++ }
++
++ public boolean removeTicketAtLevel(final TicketType type, final ChunkPos chunkPos, final int level, final T identifier) {
++ return this.removeTicketAtLevel(type, CoordinateUtils.getChunkKey(chunkPos), level, identifier);
++ }
++
++ public boolean removeTicketAtLevel(final TicketType type, final int chunkX, final int chunkZ, final int level, final T identifier) {
++ return this.removeTicketAtLevel(type, CoordinateUtils.getChunkKey(chunkX, chunkZ), level, identifier);
++ }
++
++ public boolean removeTicketAtLevel(final TicketType type, final long chunk, final int level, final T identifier) {
++ if (level > MAX_TICKET_LEVEL) {
++ return false;
++ }
++
++ this.ticketLock.lock();
++ try {
++ final SortedArraySet> ticketsAtChunk = this.tickets.get(chunk);
++ if (ticketsAtChunk == null) {
++ return false;
++ }
++
++ final int oldLevel = getTicketLevelAt(ticketsAtChunk);
++ final Ticket ticket = (Ticket)ticketsAtChunk.removeAndGet(new Ticket<>(type, level, identifier, -2L));
++
++ if (ticket == null) {
++ return false;
++ }
++
++ if (ticketsAtChunk.isEmpty()) {
++ this.tickets.remove(chunk);
++ }
++
++ final int newLevel = getTicketLevelAt(ticketsAtChunk);
++
++ final long removeTick = ticket.removalTick;
++ if (removeTick != NO_TIMEOUT_MARKER) {
++ final Long2IntOpenHashMap removeCounts = this.removeTickToChunkExpireTicketCount.get(removeTick);
++ final int currCount = removeCounts.addTo(chunk, -1);
++
++ if (currCount == 1) {
++ removeCounts.remove(chunk);
++ if (removeCounts.isEmpty()) {
++ this.removeTickToChunkExpireTicketCount.remove(removeTick);
++ }
++ }
++ }
++
++ if (oldLevel != newLevel) {
++ this.updateTicketLevel(chunk, newLevel);
++ }
++
++ return true;
++ } finally {
++ this.ticketLock.unlock();
++ }
++ }
++
++ // atomic with respect to all add/remove/addandremove ticket calls for the given chunk
++ public void addAndRemoveTickets(final long chunk, final TicketType addType, final int addLevel, final T addIdentifier,
++ final TicketType removeType, final int removeLevel, final V removeIdentifier) {
++ this.ticketLock.lock();
++ try {
++ this.addTicketAtLevel(addType, chunk, addLevel, addIdentifier);
++ this.removeTicketAtLevel(removeType, chunk, removeLevel, removeIdentifier);
++ } finally {
++ this.ticketLock.unlock();
++ }
++ }
++
++ public void removeAllTicketsFor(final TicketType ticketType, final int ticketLevel, final T ticketIdentifier) {
++ if (ticketLevel > MAX_TICKET_LEVEL) {
++ return;
++ }
++
++ this.ticketLock.lock();
++ try {
++ for (final LongIterator iterator = new LongArrayList(this.tickets.keySet()).longIterator(); iterator.hasNext();) {
++ final long chunk = iterator.nextLong();
++
++ this.removeTicketAtLevel(ticketType, chunk, ticketLevel, ticketIdentifier);
++ }
++ } finally {
++ this.ticketLock.unlock();
++ }
++ }
++
++ public void tick() {
++ TickThread.ensureTickThread("Cannot tick ticket manager off-main");
++
++ this.ticketLock.lock();
++ try {
++ final long tick = ++this.currentTick;
++
++ final Long2IntOpenHashMap toRemove = this.removeTickToChunkExpireTicketCount.remove(tick);
++
++ if (toRemove == null) {
++ return;
++ }
++
++ final Predicate> expireNow = (final Ticket> ticket) -> {
++ return ticket.removalTick == tick;
++ };
++
++ for (final LongIterator iterator = toRemove.keySet().longIterator(); iterator.hasNext();) {
++ final long chunk = iterator.nextLong();
++
++ final SortedArraySet> tickets = this.tickets.get(chunk);
++ tickets.removeIf(expireNow);
++ if (tickets.isEmpty()) {
++ this.tickets.remove(chunk);
++ this.ticketLevelPropagator.removeSource(chunk);
++ } else {
++ this.ticketLevelPropagator.setSource(chunk, convertBetweenTicketLevels(tickets.first().getTicketLevel()));
++ }
++ }
++ } finally {
++ this.ticketLock.unlock();
++ }
++
++ this.processTicketUpdates();
++ }
++
++ public NewChunkHolder getChunkHolder(final int chunkX, final int chunkZ) {
++ return this.chunkHolders.get(CoordinateUtils.getChunkKey(chunkX, chunkZ));
++ }
++
++ public NewChunkHolder getChunkHolder(final long position) {
++ return this.chunkHolders.get(position);
++ }
++
++ public void raisePriority(final int x, final int z, final PrioritisedExecutor.Priority priority) {
++ final NewChunkHolder chunkHolder = this.getChunkHolder(x, z);
++ if (chunkHolder != null) {
++ chunkHolder.raisePriority(priority);
++ }
++ }
++
++ public void setPriority(final int x, final int z, final PrioritisedExecutor.Priority priority) {
++ final NewChunkHolder chunkHolder = this.getChunkHolder(x, z);
++ if (chunkHolder != null) {
++ chunkHolder.setPriority(priority);
++ }
++ }
++
++ public void lowerPriority(final int x, final int z, final PrioritisedExecutor.Priority priority) {
++ final NewChunkHolder chunkHolder = this.getChunkHolder(x, z);
++ if (chunkHolder != null) {
++ chunkHolder.lowerPriority(priority);
++ }
++ }
++
++ private NewChunkHolder createChunkHolder(final long position) {
++ final NewChunkHolder ret = new NewChunkHolder(this.world, CoordinateUtils.getChunkX(position), CoordinateUtils.getChunkZ(position), this.taskScheduler);
++
++ ChunkSystem.onChunkHolderCreate(this.world, ret.vanillaChunkHolder);
++ ret.vanillaChunkHolder.onChunkAdd();
++
++ return ret;
++ }
++
++ // because this function creates the chunk holder without a ticket, it is the caller's responsibility to ensure
++ // the chunk holder eventually unloads. this should only be used to avoid using processTicketUpdates to create chunkholders,
++ // as processTicketUpdates may call plugin logic; in every other case a ticket is appropriate
++ private NewChunkHolder getOrCreateChunkHolder(final int chunkX, final int chunkZ) {
++ return this.getOrCreateChunkHolder(CoordinateUtils.getChunkKey(chunkX, chunkZ));
++ }
++
++ private NewChunkHolder getOrCreateChunkHolder(final long position) {
++ if (!this.ticketLock.isHeldByCurrentThread()) {
++ throw new IllegalStateException("Must hold ticket level update lock!");
++ }
++ if (!this.taskScheduler.schedulingLock.isHeldByCurrentThread()) {
++ throw new IllegalStateException("Must hold scheduler lock!!");
++ }
++
++ // we could just acquire these locks, but...
++ // must own the locks because the caller needs to ensure that no unload can occur AFTER this function returns
++
++ NewChunkHolder current = this.chunkHolders.get(position);
++ if (current != null) {
++ return current;
++ }
++
++ current = this.createChunkHolder(position);
++ this.chunkHolders.put(position, current);
++
++ return current;
++ }
++
++ private long entityLoadCounter;
++
++ public ChunkEntitySlices getOrCreateEntityChunk(final int chunkX, final int chunkZ, final boolean transientChunk) {
++ TickThread.ensureTickThread(this.world, chunkX, chunkZ, "Cannot create entity chunk off-main");
++ ChunkEntitySlices ret;
++
++ NewChunkHolder current = this.getChunkHolder(chunkX, chunkZ);
++ if (current != null && (ret = current.getEntityChunk()) != null && (transientChunk || !ret.isTransient())) {
++ return ret;
++ }
++
++ final AtomicBoolean isCompleted = new AtomicBoolean();
++ final Thread waiter = Thread.currentThread();
++ final Long entityLoadId;
++ NewChunkHolder.GenericDataLoadTaskCallback loadTask = null;
++ this.ticketLock.lock();
++ try {
++ entityLoadId = Long.valueOf(this.entityLoadCounter++);
++ this.addTicketAtLevel(TicketType.ENTITY_LOAD, chunkX, chunkZ, MAX_TICKET_LEVEL, entityLoadId);
++ this.taskScheduler.schedulingLock.lock();
++ try {
++ current = this.getOrCreateChunkHolder(chunkX, chunkZ);
++ if ((ret = current.getEntityChunk()) != null && (transientChunk || !ret.isTransient())) {
++ this.removeTicketAtLevel(TicketType.ENTITY_LOAD, chunkX, chunkZ, MAX_TICKET_LEVEL, entityLoadId);
++ return ret;
++ }
++
++ if (current.isEntityChunkNBTLoaded()) {
++ isCompleted.setPlain(true);
++ } else {
++ loadTask = current.getOrLoadEntityData((final GenericDataLoadTask.TaskResult result) -> {
++ if (!transientChunk) {
++ isCompleted.set(true);
++ LockSupport.unpark(waiter);
++ }
++ });
++ final ChunkLoadTask.EntityDataLoadTask entityLoad = current.getEntityDataLoadTask();
++
++ if (entityLoad != null && !transientChunk) {
++ entityLoad.raisePriority(PrioritisedExecutor.Priority.BLOCKING);
++ }
++ }
++ } finally {
++ this.taskScheduler.schedulingLock.unlock();
++ }
++ } finally {
++ this.ticketLock.unlock();
++ }
++
++ if (loadTask != null) {
++ loadTask.schedule();
++ }
++
++ if (!transientChunk) {
++ // Note: no need to busy wait on the chunk queue, entity load will complete off-main
++ boolean interrupted = false;
++ while (!isCompleted.get()) {
++ interrupted |= Thread.interrupted();
++ LockSupport.park();
++ }
++
++ if (interrupted) {
++ Thread.currentThread().interrupt();
++ }
++ }
++
++ // now that the entity data is loaded, we can load it into the world
++
++ ret = current.loadInEntityChunk(transientChunk);
++
++ final long chunkKey = CoordinateUtils.getChunkKey(chunkX, chunkZ);
++ this.addAndRemoveTickets(chunkKey,
++ TicketType.UNKNOWN, MAX_TICKET_LEVEL, new ChunkPos(chunkX, chunkZ),
++ TicketType.ENTITY_LOAD, MAX_TICKET_LEVEL, entityLoadId
++ );
++
++ return ret;
++ }
++
++ public PoiChunk getPoiChunkIfLoaded(final int chunkX, final int chunkZ, final boolean checkLoadInCallback) {
++ final NewChunkHolder holder = this.getChunkHolder(chunkX, chunkZ);
++ if (holder != null) {
++ final PoiChunk ret = holder.getPoiChunk();
++ return ret == null || (checkLoadInCallback && !ret.isLoaded()) ? null : ret;
++ }
++ return null;
++ }
++
++ private long poiLoadCounter;
++
++ public PoiChunk loadPoiChunk(final int chunkX, final int chunkZ) {
++ TickThread.ensureTickThread(this.world, chunkX, chunkZ, "Cannot create poi chunk off-main");
++ PoiChunk ret;
++
++ NewChunkHolder current = this.getChunkHolder(chunkX, chunkZ);
++ if (current != null && (ret = current.getPoiChunk()) != null) {
++ if (!ret.isLoaded()) {
++ ret.load();
++ }
++ return ret;
++ }
++
++ final AtomicReference completed = new AtomicReference<>();
++ final AtomicBoolean isCompleted = new AtomicBoolean();
++ final Thread waiter = Thread.currentThread();
++ final Long poiLoadId;
++ NewChunkHolder.GenericDataLoadTaskCallback loadTask = null;
++ this.ticketLock.lock();
++ try {
++ poiLoadId = Long.valueOf(this.poiLoadCounter++);
++ this.addTicketAtLevel(TicketType.POI_LOAD, chunkX, chunkZ, MAX_TICKET_LEVEL, poiLoadId);
++ this.taskScheduler.schedulingLock.lock();
++ try {
++ current = this.getOrCreateChunkHolder(chunkX, chunkZ);
++ if (current.isPoiChunkLoaded()) {
++ this.removeTicketAtLevel(TicketType.POI_LOAD, chunkX, chunkZ, MAX_TICKET_LEVEL, poiLoadId);
++ return current.getPoiChunk();
++ }
++
++ loadTask = current.getOrLoadPoiData((final GenericDataLoadTask.TaskResult result) -> {
++ completed.setPlain(result.left());
++ isCompleted.set(true);
++ LockSupport.unpark(waiter);
++ });
++ final ChunkLoadTask.PoiDataLoadTask poiLoad = current.getPoiDataLoadTask();
++
++ if (poiLoad != null) {
++ poiLoad.raisePriority(PrioritisedExecutor.Priority.BLOCKING);
++ }
++ } finally {
++ this.taskScheduler.schedulingLock.unlock();
++ }
++ } finally {
++ this.ticketLock.unlock();
++ }
++
++ if (loadTask != null) {
++ loadTask.schedule();
++ }
++
++ // Note: no need to busy wait on the chunk queue, poi load will complete off-main
++
++ boolean interrupted = false;
++ while (!isCompleted.get()) {
++ interrupted |= Thread.interrupted();
++ LockSupport.park();
++ }
++
++ if (interrupted) {
++ Thread.currentThread().interrupt();
++ }
++
++ ret = completed.getPlain();
++
++ ret.load();
++
++ final long chunkKey = CoordinateUtils.getChunkKey(chunkX, chunkZ);
++ this.addAndRemoveTickets(chunkKey,
++ TicketType.UNKNOWN, MAX_TICKET_LEVEL, new ChunkPos(chunkX, chunkZ),
++ TicketType.POI_LOAD, MAX_TICKET_LEVEL, poiLoadId
++ );
++
++ return ret;
++ }
++
++ void addChangedStatuses(final List changedFullStatus) {
++ if (changedFullStatus.isEmpty()) {
++ return;
++ }
++ if (!TickThread.isTickThread()) {
++ this.taskScheduler.scheduleChunkTask(() -> {
++ final ArrayDeque pendingFullLoadUpdate = ChunkHolderManager.this.pendingFullLoadUpdate;
++ for (int i = 0, len = changedFullStatus.size(); i < len; ++i) {
++ pendingFullLoadUpdate.add(changedFullStatus.get(i));
++ }
++
++ ChunkHolderManager.this.processPendingFullUpdate();
++ }, PrioritisedExecutor.Priority.HIGHEST);
++ } else {
++ final ArrayDeque pendingFullLoadUpdate = this.pendingFullLoadUpdate;
++ for (int i = 0, len = changedFullStatus.size(); i < len; ++i) {
++ pendingFullLoadUpdate.add(changedFullStatus.get(i));
++ }
++ }
++ }
++
++ final ReferenceLinkedOpenHashSet unloadQueue = new ReferenceLinkedOpenHashSet<>();
++
++ private void removeChunkHolder(final NewChunkHolder holder) {
++ holder.killed = true;
++ holder.vanillaChunkHolder.onChunkRemove();
++ this.autoSaveQueue.remove(holder);
++ ChunkSystem.onChunkHolderDelete(this.world, holder.vanillaChunkHolder);
++ this.chunkHolders.remove(CoordinateUtils.getChunkKey(holder.chunkX, holder.chunkZ));
++ }
++
++ // note: never call while inside the chunk system, this will absolutely break everything
++ public void processUnloads() {
++ TickThread.ensureTickThread("Cannot unload chunks off-main");
++
++ if (BLOCK_TICKET_UPDATES.get() == Boolean.TRUE) {
++ throw new IllegalStateException("Cannot unload chunks recursively");
++ }
++ if (this.ticketLock.isHeldByCurrentThread()) {
++ throw new IllegalStateException("Cannot hold ticket update lock while calling processUnloads");
++ }
++ if (this.taskScheduler.schedulingLock.isHeldByCurrentThread()) {
++ throw new IllegalStateException("Cannot hold scheduling lock while calling processUnloads");
++ }
++
++ final List unloadQueue;
++ final List scheduleList = new ArrayList<>();
++ this.ticketLock.lock();
++ try {
++ this.taskScheduler.schedulingLock.lock();
++ try {
++ if (this.unloadQueue.isEmpty()) {
++ return;
++ }
++ // in order to ensure all chunks in the unload queue do not have a pending ticket level update,
++ // process them now
++ this.processTicketUpdates(false, false, scheduleList);
++ unloadQueue = new ArrayList<>((int)(this.unloadQueue.size() * 0.05) + 1);
++
++ final int unloadCount = Math.max(50, (int)(this.unloadQueue.size() * 0.05));
++ for (int i = 0; i < unloadCount && !this.unloadQueue.isEmpty(); ++i) {
++ final NewChunkHolder chunkHolder = this.unloadQueue.removeFirst();
++ if (chunkHolder.isSafeToUnload() != null) {
++ LOGGER.error("Chunkholder " + chunkHolder + " is not safe to unload but is inside the unload queue?");
++ continue;
++ }
++ final NewChunkHolder.UnloadState state = chunkHolder.unloadStage1();
++ if (state == null) {
++ // can unload immediately
++ this.removeChunkHolder(chunkHolder);
++ continue;
++ }
++ unloadQueue.add(state);
++ }
++ } finally {
++ this.taskScheduler.schedulingLock.unlock();
++ }
++ } finally {
++ this.ticketLock.unlock();
++ }
++ // schedule tasks, we can't let processTicketUpdates do this because we call it holding the schedule lock
++ for (int i = 0, len = scheduleList.size(); i < len; ++i) {
++ scheduleList.get(i).schedule();
++ }
++
++ final List toRemove = new ArrayList<>(unloadQueue.size());
++
++ final Boolean before = this.blockTicketUpdates();
++ try {
++ for (int i = 0, len = unloadQueue.size(); i < len; ++i) {
++ final NewChunkHolder.UnloadState state = unloadQueue.get(i);
++ final NewChunkHolder holder = state.holder();
++
++ holder.unloadStage2(state);
++ toRemove.add(holder);
++ }
++ } finally {
++ this.unblockTicketUpdates(before);
++ }
++
++ this.ticketLock.lock();
++ try {
++ this.taskScheduler.schedulingLock.lock();
++ try {
++ for (int i = 0, len = toRemove.size(); i < len; ++i) {
++ final NewChunkHolder holder = toRemove.get(i);
++
++ if (holder.unloadStage3()) {
++ this.removeChunkHolder(holder);
++ } else {
++ // add cooldown so the next unload check is not immediately next tick
++ this.addTicketAtLevel(TicketType.UNLOAD_COOLDOWN, holder.chunkX, holder.chunkZ, MAX_TICKET_LEVEL, Unit.INSTANCE);
++ }
++ }
++ } finally {
++ this.taskScheduler.schedulingLock.unlock();
++ }
++ } finally {
++ this.ticketLock.unlock();
++ }
++ }
++
++ private final ThreadLocal BLOCK_TICKET_UPDATES = ThreadLocal.withInitial(() -> {
++ return Boolean.FALSE;
++ });
++
++ public Boolean blockTicketUpdates() {
++ final Boolean ret = BLOCK_TICKET_UPDATES.get();
++ BLOCK_TICKET_UPDATES.set(Boolean.TRUE);
++ return ret;
++ }
++
++ public void unblockTicketUpdates(final Boolean before) {
++ BLOCK_TICKET_UPDATES.set(before);
++ }
++
++ public boolean processTicketUpdates() {
++ return this.processTicketUpdates(true, true, null);
++ }
++
++ private static final ThreadLocal> CURRENT_TICKET_UPDATE_SCHEDULING = new ThreadLocal<>();
++
++ static List getCurrentTicketUpdateScheduling() {
++ return CURRENT_TICKET_UPDATE_SCHEDULING.get();
++ }
++
++ private boolean processTicketUpdates(final boolean checkLocks, final boolean processFullUpdates, List scheduledTasks) {
++ TickThread.ensureTickThread("Cannot process ticket levels off-main");
++ if (BLOCK_TICKET_UPDATES.get() == Boolean.TRUE) {
++ throw new IllegalStateException("Cannot update ticket level while unloading chunks or updating entity manager");
++ }
++ if (checkLocks && this.ticketLock.isHeldByCurrentThread()) {
++ throw new IllegalStateException("Illegal recursive processTicketUpdates!");
++ }
++ if (checkLocks && this.taskScheduler.schedulingLock.isHeldByCurrentThread()) {
++ throw new IllegalStateException("Cannot update ticket levels from a scheduler context!");
++ }
++
++ List changedFullStatus = null;
++
++ final boolean isTickThread = TickThread.isTickThread();
++
++ boolean ret = false;
++ final boolean canProcessFullUpdates = processFullUpdates & isTickThread;
++ final boolean canProcessScheduling = scheduledTasks == null;
++
++ this.ticketLock.lock();
++ try {
++ final boolean levelsUpdated = this.ticketLevelPropagator.propagateUpdates();
++ if (levelsUpdated) {
++ // Unlike CB, ticket level updates cannot happen recursively. Thank god.
++ if (!this.ticketLevelUpdates.isEmpty()) {
++ ret = true;
++
++ // first the necessary chunkholders must be created, so just update the ticket levels
++ for (final Iterator iterator = this.ticketLevelUpdates.long2IntEntrySet().fastIterator(); iterator.hasNext();) {
++ final Long2IntMap.Entry entry = iterator.next();
++ final long key = entry.getLongKey();
++ final int newLevel = entry.getIntValue();
++
++ NewChunkHolder current = this.chunkHolders.get(key);
++ if (current == null && newLevel > MAX_TICKET_LEVEL) {
++ // not loaded and it shouldn't be loaded!
++ iterator.remove();
++ continue;
++ }
++
++ final int currentLevel = current == null ? MAX_TICKET_LEVEL + 1 : current.getCurrentTicketLevel();
++ if (currentLevel == newLevel) {
++ // nothing to do
++ iterator.remove();
++ continue;
++ }
++
++ if (current == null) {
++ // must create
++ current = this.createChunkHolder(key);
++ this.chunkHolders.put(key, current);
++ current.updateTicketLevel(newLevel);
++ } else {
++ current.updateTicketLevel(newLevel);
++ }
++ }
++
++ if (scheduledTasks == null) {
++ scheduledTasks = new ArrayList<>();
++ }
++ changedFullStatus = new ArrayList<>();
++
++ // allow the chunkholders to process ticket level updates without needing to acquire the schedule lock every time
++ final List prev = CURRENT_TICKET_UPDATE_SCHEDULING.get();
++ CURRENT_TICKET_UPDATE_SCHEDULING.set(scheduledTasks);
++ try {
++ this.taskScheduler.schedulingLock.lock();
++ try {
++ for (final Iterator iterator = this.ticketLevelUpdates.long2IntEntrySet().fastIterator(); iterator.hasNext();) {
++ final Long2IntMap.Entry entry = iterator.next();
++ final long key = entry.getLongKey();
++ final NewChunkHolder current = this.chunkHolders.get(key);
++
++ if (current == null) {
++ throw new IllegalStateException("Expected chunk holder to be created");
++ }
++
++ current.processTicketLevelUpdate(scheduledTasks, changedFullStatus);
++ }
++ } finally {
++ this.taskScheduler.schedulingLock.unlock();
++ }
++ } finally {
++ CURRENT_TICKET_UPDATE_SCHEDULING.set(prev);
++ }
++
++ this.ticketLevelUpdates.clear();
++ }
++ }
++ } finally {
++ this.ticketLock.unlock();
++ }
++
++ if (changedFullStatus != null) {
++ this.addChangedStatuses(changedFullStatus);
++ }
++
++ if (canProcessScheduling && scheduledTasks != null) {
++ for (int i = 0, len = scheduledTasks.size(); i < len; ++i) {
++ scheduledTasks.get(i).schedule();
++ }
++ }
++
++ if (canProcessFullUpdates) {
++ ret |= this.processPendingFullUpdate();
++ }
++
++ return ret;
++ }
++
++ // only call on tick thread
++ protected final boolean processPendingFullUpdate() {
++ final ArrayDeque pendingFullLoadUpdate = this.pendingFullLoadUpdate;
++
++ boolean ret = false;
++
++ List changedFullStatus = new ArrayList<>();
++
++ NewChunkHolder holder;
++ while ((holder = pendingFullLoadUpdate.poll()) != null) {
++ ret |= holder.handleFullStatusChange(changedFullStatus);
++
++ if (!changedFullStatus.isEmpty()) {
++ for (int i = 0, len = changedFullStatus.size(); i < len; ++i) {
++ pendingFullLoadUpdate.add(changedFullStatus.get(i));
++ }
++ changedFullStatus.clear();
++ }
++ }
++
++ return ret;
++ }
++
++ public JsonObject getDebugJsonForWatchdog() {
++ // try and detect any potential deadlock that would require us to read unlocked
++ try {
++ if (this.ticketLock.tryLock(10, TimeUnit.SECONDS)) {
++ try {
++ if (this.taskScheduler.schedulingLock.tryLock(10, TimeUnit.SECONDS)) {
++ try {
++ return this.getDebugJsonNoLock();
++ } finally {
++ this.taskScheduler.schedulingLock.unlock();
++ }
++ }
++ } finally {
++ this.ticketLock.unlock();
++ }
++ }
++ } catch (final InterruptedException ignore) {}
++
++ LOGGER.error("Failed to acquire ticket and scheduling lock before timeout for world " + this.world.getWorld().getName());
++
++ // because we read without locks, it may throw exceptions for fastutil maps
++ // so just try until it works...
++ Throwable lastException = null;
++ for (int count = 0;count < 1000;++count) {
++ try {
++ return this.getDebugJsonNoLock();
++ } catch (final ThreadDeath death) {
++ throw death;
++ } catch (final Throwable thr) {
++ lastException = thr;
++ Thread.yield();
++ LockSupport.parkNanos(10_000L);
++ }
++ }
++
++ // failed, return
++ LOGGER.error("Failed to retrieve debug json for watchdog thread without locking", lastException);
++ return null;
++ }
++
++ private JsonObject getDebugJsonNoLock() {
++ final JsonObject ret = new JsonObject();
++ ret.addProperty("current_tick", Long.valueOf(this.currentTick));
++
++ final JsonArray unloadQueue = new JsonArray();
++ ret.add("unload_queue", unloadQueue);
++ for (final NewChunkHolder holder : this.unloadQueue) {
++ final JsonObject coordinate = new JsonObject();
++ unloadQueue.add(coordinate);
++
++ coordinate.addProperty("chunkX", Integer.valueOf(holder.chunkX));
++ coordinate.addProperty("chunkZ", Integer.valueOf(holder.chunkZ));
++ }
++
++ final JsonArray holders = new JsonArray();
++ ret.add("chunkholders", holders);
++
++ for (final NewChunkHolder holder : this.getChunkHolders()) {
++ holders.add(holder.getDebugJson());
++ }
++
++ final JsonArray removeTickToChunkExpireTicketCount = new JsonArray();
++ ret.add("remove_tick_to_chunk_expire_ticket_count", removeTickToChunkExpireTicketCount);
++
++ for (final Long2ObjectMap.Entry tickEntry : this.removeTickToChunkExpireTicketCount.long2ObjectEntrySet()) {
++ final long tick = tickEntry.getLongKey();
++ final Long2IntOpenHashMap coordinateToCount = tickEntry.getValue();
++
++ final JsonObject tickJson = new JsonObject();
++ removeTickToChunkExpireTicketCount.add(tickJson);
++
++ tickJson.addProperty("tick", Long.valueOf(tick));
++
++ final JsonArray tickEntries = new JsonArray();
++ tickJson.add("entries", tickEntries);
++
++ for (final Long2IntMap.Entry entry : coordinateToCount.long2IntEntrySet()) {
++ final long coordinate = entry.getLongKey();
++ final int count = entry.getIntValue();
++
++ final JsonObject entryJson = new JsonObject();
++ tickEntries.add(entryJson);
++
++ entryJson.addProperty("chunkX", Long.valueOf(CoordinateUtils.getChunkX(coordinate)));
++ entryJson.addProperty("chunkZ", Long.valueOf(CoordinateUtils.getChunkZ(coordinate)));
++ entryJson.addProperty("count", Integer.valueOf(count));
++ }
++ }
++
++ final JsonArray allTicketsJson = new JsonArray();
++ ret.add("tickets", allTicketsJson);
++
++ for (final Long2ObjectMap.Entry>> coordinateTickets : this.tickets.long2ObjectEntrySet()) {
++ final long coordinate = coordinateTickets.getLongKey();
++ final SortedArraySet> tickets = coordinateTickets.getValue();
++
++ final JsonObject coordinateJson = new JsonObject();
++ allTicketsJson.add(coordinateJson);
++
++ coordinateJson.addProperty("chunkX", Long.valueOf(CoordinateUtils.getChunkX(coordinate)));
++ coordinateJson.addProperty("chunkZ", Long.valueOf(CoordinateUtils.getChunkZ(coordinate)));
++
++ final JsonArray ticketsSerialized = new JsonArray();
++ coordinateJson.add("tickets", ticketsSerialized);
++
++ for (final Ticket> ticket : tickets) {
++ final JsonObject ticketSerialized = new JsonObject();
++ ticketsSerialized.add(ticketSerialized);
++
++ ticketSerialized.addProperty("type", ticket.getType().toString());
++ ticketSerialized.addProperty("level", Integer.valueOf(ticket.getTicketLevel()));
++ ticketSerialized.addProperty("identifier", Objects.toString(ticket.key));
++ ticketSerialized.addProperty("remove_tick", Long.valueOf(ticket.removalTick));
++ }
++ }
++
++ return ret;
++ }
++
++ public JsonObject getDebugJson() {
++ final List scheduleList = new ArrayList<>();
++ try {
++ final JsonObject ret;
++ this.ticketLock.lock();
++ try {
++ this.taskScheduler.schedulingLock.lock();
++ try {
++ this.processTicketUpdates(false, false, scheduleList);
++ ret = this.getDebugJsonNoLock();
++ } finally {
++ this.taskScheduler.schedulingLock.unlock();
++ }
++ } finally {
++ this.ticketLock.unlock();
++ }
++ return ret;
++ } finally {
++ // schedule tasks, we can't let processTicketUpdates do this because we call it holding the schedule lock
++ for (int i = 0, len = scheduleList.size(); i < len; ++i) {
++ scheduleList.get(i).schedule();
++ }
++ }
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLightTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLightTask.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..53ddd7e9ac05e6a9eb809f329796e6d4f6bb2ab1
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLightTask.java
+@@ -0,0 +1,181 @@
++package io.papermc.paper.chunk.system.scheduling;
++
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
++import ca.spottedleaf.starlight.common.light.StarLightEngine;
++import ca.spottedleaf.starlight.common.light.StarLightInterface;
++import io.papermc.paper.chunk.system.light.LightQueue;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.world.level.ChunkPos;
++import net.minecraft.world.level.chunk.ChunkAccess;
++import net.minecraft.world.level.chunk.ChunkStatus;
++import net.minecraft.world.level.chunk.ProtoChunk;
++import org.apache.logging.log4j.LogManager;
++import org.apache.logging.log4j.Logger;
++import java.util.function.BooleanSupplier;
++
++public final class ChunkLightTask extends ChunkProgressionTask {
++
++ private static final Logger LOGGER = LogManager.getLogger();
++
++ protected final ChunkAccess fromChunk;
++
++ private final LightTaskPriorityHolder priorityHolder;
++
++ public ChunkLightTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX, final int chunkZ,
++ final ChunkAccess chunk, final PrioritisedExecutor.Priority priority) {
++ super(scheduler, world, chunkX, chunkZ);
++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority " + priority);
++ }
++ this.priorityHolder = new LightTaskPriorityHolder(priority, this);
++ this.fromChunk = chunk;
++ }
++
++ @Override
++ public boolean isScheduled() {
++ return this.priorityHolder.isScheduled();
++ }
++
++ @Override
++ public ChunkStatus getTargetStatus() {
++ return ChunkStatus.LIGHT;
++ }
++
++ @Override
++ public void schedule() {
++ this.priorityHolder.schedule();
++ }
++
++ @Override
++ public void cancel() {
++ this.priorityHolder.cancel();
++ }
++
++ @Override
++ public PrioritisedExecutor.Priority getPriority() {
++ return this.priorityHolder.getPriority();
++ }
++
++ @Override
++ public void lowerPriority(final PrioritisedExecutor.Priority priority) {
++ this.priorityHolder.raisePriority(priority);
++ }
++
++ @Override
++ public void setPriority(final PrioritisedExecutor.Priority priority) {
++ this.priorityHolder.setPriority(priority);
++ }
++
++ @Override
++ public void raisePriority(final PrioritisedExecutor.Priority priority) {
++ this.priorityHolder.raisePriority(priority);
++ }
++
++ private static final class LightTaskPriorityHolder extends PriorityHolder {
++
++ protected final ChunkLightTask task;
++
++ protected LightTaskPriorityHolder(final PrioritisedExecutor.Priority priority, final ChunkLightTask task) {
++ super(priority);
++ this.task = task;
++ }
++
++ @Override
++ protected void cancelScheduled() {
++ final ChunkLightTask task = this.task;
++ task.complete(null, null);
++ }
++
++ @Override
++ protected PrioritisedExecutor.Priority getScheduledPriority() {
++ final ChunkLightTask task = this.task;
++ return task.world.getChunkSource().getLightEngine().theLightEngine.lightQueue.getPriority(task.chunkX, task.chunkZ);
++ }
++
++ @Override
++ protected void scheduleTask(final PrioritisedExecutor.Priority priority) {
++ final ChunkLightTask task = this.task;
++ final StarLightInterface starLightInterface = task.world.getChunkSource().getLightEngine().theLightEngine;
++ final LightQueue lightQueue = starLightInterface.lightQueue;
++ lightQueue.queueChunkLightTask(new ChunkPos(task.chunkX, task.chunkZ), new LightTask(starLightInterface, task), priority);
++ lightQueue.setPriority(task.chunkX, task.chunkZ, priority);
++ }
++
++ @Override
++ protected void lowerPriorityScheduled(final PrioritisedExecutor.Priority priority) {
++ final ChunkLightTask task = this.task;
++ final StarLightInterface starLightInterface = task.world.getChunkSource().getLightEngine().theLightEngine;
++ final LightQueue lightQueue = starLightInterface.lightQueue;
++ lightQueue.lowerPriority(task.chunkX, task.chunkZ, priority);
++ }
++
++ @Override
++ protected void setPriorityScheduled(final PrioritisedExecutor.Priority priority) {
++ final ChunkLightTask task = this.task;
++ final StarLightInterface starLightInterface = task.world.getChunkSource().getLightEngine().theLightEngine;
++ final LightQueue lightQueue = starLightInterface.lightQueue;
++ lightQueue.setPriority(task.chunkX, task.chunkZ, priority);
++ }
++
++ @Override
++ protected void raisePriorityScheduled(final PrioritisedExecutor.Priority priority) {
++ final ChunkLightTask task = this.task;
++ final StarLightInterface starLightInterface = task.world.getChunkSource().getLightEngine().theLightEngine;
++ final LightQueue lightQueue = starLightInterface.lightQueue;
++ lightQueue.raisePriority(task.chunkX, task.chunkZ, priority);
++ }
++ }
++
++ private static final class LightTask implements BooleanSupplier {
++
++ protected final StarLightInterface lightEngine;
++ protected final ChunkLightTask task;
++
++ public LightTask(final StarLightInterface lightEngine, final ChunkLightTask task) {
++ this.lightEngine = lightEngine;
++ this.task = task;
++ }
++
++ @Override
++ public boolean getAsBoolean() {
++ final ChunkLightTask task = this.task;
++ // executed on light thread
++ if (!task.priorityHolder.markExecuting()) {
++ // cancelled
++ return false;
++ }
++
++ try {
++ final Boolean[] emptySections = StarLightEngine.getEmptySectionsForChunk(task.fromChunk);
++
++ if (task.fromChunk.isLightCorrect() && task.fromChunk.getStatus().isOrAfter(ChunkStatus.LIGHT)) {
++ this.lightEngine.forceLoadInChunk(task.fromChunk, emptySections);
++ this.lightEngine.checkChunkEdges(task.chunkX, task.chunkZ);
++ } else {
++ task.fromChunk.setLightCorrect(false);
++ this.lightEngine.lightChunk(task.fromChunk, emptySections);
++ task.fromChunk.setLightCorrect(true);
++ }
++ // we need to advance status
++ if (task.fromChunk instanceof ProtoChunk chunk && chunk.getStatus() == ChunkStatus.LIGHT.getParent()) {
++ chunk.setStatus(ChunkStatus.LIGHT);
++ }
++ } catch (final Throwable thr) {
++ if (!(thr instanceof ThreadDeath)) {
++ LOGGER.fatal("Failed to light chunk " + task.fromChunk.getPos().toString() + " in world '" + this.lightEngine.getWorld().getWorld().getName() + "'", thr);
++ }
++
++ task.complete(null, thr);
++
++ if (thr instanceof ThreadDeath) {
++ throw (ThreadDeath)thr;
++ }
++
++ return true;
++ }
++
++ task.complete(task.fromChunk, null);
++ return true;
++ }
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLoadTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLoadTask.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..1e2b8e457aabba2a5d1fabfba22be2faa1d3f45d
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLoadTask.java
+@@ -0,0 +1,499 @@
++package io.papermc.paper.chunk.system.scheduling;
++
++import ca.spottedleaf.concurrentutil.collection.MultiThreadedQueue;
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
++import ca.spottedleaf.dataconverter.minecraft.MCDataConverter;
++import ca.spottedleaf.dataconverter.minecraft.datatypes.MCTypeRegistry;
++import com.mojang.logging.LogUtils;
++import io.papermc.paper.chunk.system.io.RegionFileIOThread;
++import io.papermc.paper.chunk.system.poi.PoiChunk;
++import net.minecraft.SharedConstants;
++import net.minecraft.core.Registry;
++import net.minecraft.nbt.CompoundTag;
++import net.minecraft.server.level.ChunkMap;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.world.level.ChunkPos;
++import net.minecraft.world.level.chunk.ChunkAccess;
++import net.minecraft.world.level.chunk.ChunkStatus;
++import net.minecraft.world.level.chunk.ProtoChunk;
++import net.minecraft.world.level.chunk.UpgradeData;
++import net.minecraft.world.level.chunk.storage.ChunkSerializer;
++import net.minecraft.world.level.chunk.storage.EntityStorage;
++import net.minecraft.world.level.levelgen.blending.BlendingData;
++import org.slf4j.Logger;
++import java.lang.invoke.VarHandle;
++import java.util.Map;
++import java.util.concurrent.atomic.AtomicInteger;
++import java.util.function.Consumer;
++
++public final class ChunkLoadTask extends ChunkProgressionTask {
++
++ private static final Logger LOGGER = LogUtils.getLogger();
++
++ private final NewChunkHolder chunkHolder;
++ private final ChunkDataLoadTask loadTask;
++
++ private boolean cancelled;
++ private NewChunkHolder.GenericDataLoadTaskCallback entityLoadTask;
++ private NewChunkHolder.GenericDataLoadTaskCallback poiLoadTask;
++
++ protected ChunkLoadTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX, final int chunkZ,
++ final NewChunkHolder chunkHolder, final PrioritisedExecutor.Priority priority) {
++ super(scheduler, world, chunkX, chunkZ);
++ this.chunkHolder = chunkHolder;
++ this.loadTask = new ChunkDataLoadTask(scheduler, world, chunkX, chunkZ, priority);
++ this.loadTask.addCallback((final GenericDataLoadTask.TaskResult result) -> {
++ ChunkLoadTask.this.complete(result == null ? null : result.left(), result == null ? null : result.right());
++ });
++ }
++
++ @Override
++ public ChunkStatus getTargetStatus() {
++ return ChunkStatus.EMPTY;
++ }
++
++ private boolean scheduled;
++
++ @Override
++ public boolean isScheduled() {
++ return this.scheduled;
++ }
++
++ @Override
++ public void schedule() {
++ final NewChunkHolder.GenericDataLoadTaskCallback entityLoadTask;
++ final NewChunkHolder.GenericDataLoadTaskCallback poiLoadTask;
++
++ final AtomicInteger count = new AtomicInteger();
++ final Consumer> scheduleLoadTask = (final GenericDataLoadTask.TaskResult, ?> result) -> {
++ if (count.decrementAndGet() == 0) {
++ ChunkLoadTask.this.loadTask.schedule(false);
++ }
++ };
++
++ // NOTE: it is IMPOSSIBLE for getOrLoadEntityData/getOrLoadPoiData to complete synchronously, because
++ // they must schedule a task to off main or to on main to complete
++ this.scheduler.schedulingLock.lock();
++ try {
++ if (this.scheduled) {
++ throw new IllegalStateException("schedule() called twice");
++ }
++ this.scheduled = true;
++ if (this.cancelled) {
++ return;
++ }
++ if (!this.chunkHolder.isEntityChunkNBTLoaded()) {
++ entityLoadTask = this.chunkHolder.getOrLoadEntityData((Consumer)scheduleLoadTask);
++ count.setPlain(count.getPlain() + 1);
++ } else {
++ entityLoadTask = null;
++ }
++
++ if (!this.chunkHolder.isPoiChunkLoaded()) {
++ poiLoadTask = this.chunkHolder.getOrLoadPoiData((Consumer)scheduleLoadTask);
++ count.setPlain(count.getPlain() + 1);
++ } else {
++ poiLoadTask = null;
++ }
++
++ this.entityLoadTask = entityLoadTask;
++ this.poiLoadTask = poiLoadTask;
++ } finally {
++ this.scheduler.schedulingLock.unlock();
++ }
++
++ if (entityLoadTask != null) {
++ entityLoadTask.schedule();
++ }
++
++ if (poiLoadTask != null) {
++ poiLoadTask.schedule();
++ }
++
++ if (entityLoadTask == null && poiLoadTask == null) {
++ // no need to wait on those, we can schedule now
++ this.loadTask.schedule(false);
++ }
++ }
++
++ @Override
++ public void cancel() {
++ // must be before load task access, so we can synchronise with the writes to the fields
++ this.scheduler.schedulingLock.lock();
++ try {
++ this.cancelled = true;
++ } finally {
++ this.scheduler.schedulingLock.unlock();
++ }
++
++ /*
++ Note: The entityLoadTask/poiLoadTask do not complete when cancelled,
++ but this is fine because if they are successfully cancelled then
++ we will successfully cancel the load task, which will complete when cancelled
++ */
++
++ if (this.entityLoadTask != null) {
++ this.entityLoadTask.cancel();
++ }
++ if (this.poiLoadTask != null) {
++ this.poiLoadTask.cancel();
++ }
++ this.loadTask.cancel();
++ }
++
++ @Override
++ public PrioritisedExecutor.Priority getPriority() {
++ return this.loadTask.getPriority();
++ }
++
++ @Override
++ public void lowerPriority(final PrioritisedExecutor.Priority priority) {
++ final EntityDataLoadTask entityLoad = this.chunkHolder.getEntityDataLoadTask();
++ if (entityLoad != null) {
++ entityLoad.lowerPriority(priority);
++ }
++
++ final PoiDataLoadTask poiLoad = this.chunkHolder.getPoiDataLoadTask();
++
++ if (poiLoad != null) {
++ poiLoad.lowerPriority(priority);
++ }
++
++ this.loadTask.lowerPriority(priority);
++ }
++
++ @Override
++ public void setPriority(final PrioritisedExecutor.Priority priority) {
++ final EntityDataLoadTask entityLoad = this.chunkHolder.getEntityDataLoadTask();
++ if (entityLoad != null) {
++ entityLoad.setPriority(priority);
++ }
++
++ final PoiDataLoadTask poiLoad = this.chunkHolder.getPoiDataLoadTask();
++
++ if (poiLoad != null) {
++ poiLoad.setPriority(priority);
++ }
++
++ this.loadTask.setPriority(priority);
++ }
++
++ @Override
++ public void raisePriority(final PrioritisedExecutor.Priority priority) {
++ final EntityDataLoadTask entityLoad = this.chunkHolder.getEntityDataLoadTask();
++ if (entityLoad != null) {
++ entityLoad.raisePriority(priority);
++ }
++
++ final PoiDataLoadTask poiLoad = this.chunkHolder.getPoiDataLoadTask();
++
++ if (poiLoad != null) {
++ poiLoad.raisePriority(priority);
++ }
++
++ this.loadTask.raisePriority(priority);
++ }
++
++ protected static abstract class CallbackDataLoadTask extends GenericDataLoadTask {
++
++ private TaskResult result;
++ private final MultiThreadedQueue>> waiters = new MultiThreadedQueue<>();
++
++ protected volatile boolean completed;
++ protected static final VarHandle COMPLETED_HANDLE = ConcurrentUtil.getVarHandle(CallbackDataLoadTask.class, "completed", boolean.class);
++
++ protected CallbackDataLoadTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX,
++ final int chunkZ, final RegionFileIOThread.RegionFileType type,
++ final PrioritisedExecutor.Priority priority) {
++ super(scheduler, world, chunkX, chunkZ, type, priority);
++ }
++
++ public void addCallback(final Consumer> consumer) {
++ if (!this.waiters.add(consumer)) {
++ try {
++ consumer.accept(this.result);
++ } catch (final Throwable throwable) {
++ this.scheduler.unrecoverableChunkSystemFailure(this.chunkX, this.chunkZ, Map.of(
++ "Consumer", ChunkTaskScheduler.stringIfNull(consumer),
++ "Completed throwable", ChunkTaskScheduler.stringIfNull(this.result.right())
++ ), throwable);
++ if (throwable instanceof ThreadDeath) {
++ throw (ThreadDeath)throwable;
++ }
++ }
++ }
++ }
++
++ @Override
++ protected void onComplete(final TaskResult result) {
++ if ((boolean)COMPLETED_HANDLE.getAndSet((CallbackDataLoadTask)this, (boolean)true)) {
++ throw new IllegalStateException("Already completed");
++ }
++ this.result = result;
++ Consumer> consumer;
++ while ((consumer = this.waiters.pollOrBlockAdds()) != null) {
++ try {
++ consumer.accept(result);
++ } catch (final Throwable throwable) {
++ this.scheduler.unrecoverableChunkSystemFailure(this.chunkX, this.chunkZ, Map.of(
++ "Consumer", ChunkTaskScheduler.stringIfNull(consumer),
++ "Completed throwable", ChunkTaskScheduler.stringIfNull(result.right())
++ ), throwable);
++ if (throwable instanceof ThreadDeath) {
++ throw (ThreadDeath)throwable;
++ }
++ return;
++ }
++ }
++ }
++ }
++
++ public final class ChunkDataLoadTask extends CallbackDataLoadTask {
++ protected ChunkDataLoadTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX,
++ final int chunkZ, final PrioritisedExecutor.Priority priority) {
++ super(scheduler, world, chunkX, chunkZ, RegionFileIOThread.RegionFileType.CHUNK_DATA, priority);
++ }
++
++ @Override
++ protected boolean hasOffMain() {
++ return true;
++ }
++
++ @Override
++ protected boolean hasOnMain() {
++ return true;
++ }
++
++ @Override
++ protected PrioritisedExecutor.PrioritisedTask createOffMain(final Runnable run, final PrioritisedExecutor.Priority priority) {
++ return this.scheduler.loadExecutor.createTask(run, priority);
++ }
++
++ @Override
++ protected PrioritisedExecutor.PrioritisedTask createOnMain(final Runnable run, final PrioritisedExecutor.Priority priority) {
++ return this.scheduler.createChunkTask(this.chunkX, this.chunkZ, run, priority);
++ }
++
++ @Override
++ protected TaskResult completeOnMainOffMain(final ChunkSerializer.InProgressChunkHolder data, final Throwable throwable) {
++ if (data != null) {
++ return null;
++ }
++
++ final PoiChunk poiChunk = ChunkLoadTask.this.chunkHolder.getPoiChunk();
++ if (poiChunk == null) {
++ LOGGER.error("Expected poi chunk to be loaded with chunk for task " + this.toString());
++ } else if (!poiChunk.isLoaded()) {
++ // need to call poiChunk.load() on main
++ return null;
++ }
++
++ return new TaskResult<>(this.getEmptyChunk(), null);
++ }
++
++ @Override
++ protected TaskResult runOffMain(final CompoundTag data, final Throwable throwable) {
++ if (throwable != null) {
++ LOGGER.error("Failed to load chunk data for task: " + this.toString() + ", chunk data will be lost", throwable);
++ return new TaskResult<>(null, null);
++ }
++
++ if (data == null) {
++ return new TaskResult<>(null, null);
++ }
++
++ // need to convert data, and then deserialize it
++
++ try {
++ final ChunkPos chunkPos = new ChunkPos(this.chunkX, this.chunkZ);
++ final ChunkMap chunkMap = this.world.getChunkSource().chunkMap;
++ // run converters
++ // note: upgradeChunkTag copies the data already
++ final CompoundTag converted = chunkMap.upgradeChunkTag(
++ this.world.getTypeKey(), chunkMap.overworldDataStorage, data, chunkMap.generator.getTypeNameForDataFixer(),
++ chunkPos, this.world
++ );
++ // deserialize
++ final ChunkSerializer.InProgressChunkHolder chunkHolder = ChunkSerializer.loadChunk(
++ this.world, chunkMap.getPoiManager(), chunkPos, converted, true
++ );
++
++ return new TaskResult<>(chunkHolder, null);
++ } catch (final ThreadDeath death) {
++ throw death;
++ } catch (final Throwable thr2) {
++ LOGGER.error("Failed to parse chunk data for task: " + this.toString() + ", chunk data will be lost", thr2);
++ return new TaskResult<>(null, thr2);
++ }
++ }
++
++ private ProtoChunk getEmptyChunk() {
++ return new ProtoChunk(
++ new ChunkPos(this.chunkX, this.chunkZ), UpgradeData.EMPTY, this.world,
++ this.world.registryAccess().registryOrThrow(Registry.BIOME_REGISTRY), (BlendingData)null
++ );
++ }
++
++ @Override
++ protected TaskResult runOnMain(final ChunkSerializer.InProgressChunkHolder data, final Throwable throwable) {
++ final PoiChunk poiChunk = ChunkLoadTask.this.chunkHolder.getPoiChunk();
++ if (poiChunk == null) {
++ LOGGER.error("Expected poi chunk to be loaded with chunk for task " + this.toString());
++ } else {
++ poiChunk.load();
++ }
++
++ if (data == null || data.protoChunk == null) {
++ // throwable could be non-null, but the off-main task will print its exceptions - so we don't need to care,
++ // it's handled already
++
++ return new TaskResult<>(this.getEmptyChunk(), null);
++ }
++
++ // have tasks to run (at this point, it's just the POI consistency checking)
++ try {
++ if (data.tasks != null) {
++ for (int i = 0, len = data.tasks.size(); i < len; ++i) {
++ data.tasks.poll().run();
++ }
++ }
++
++ return new TaskResult<>(data.protoChunk, null);
++ } catch (final ThreadDeath death) {
++ throw death;
++ } catch (final Throwable thr2) {
++ LOGGER.error("Failed to parse main tasks for task " + this.toString() + ", chunk data will be lost", thr2);
++ return new TaskResult<>(this.getEmptyChunk(), null);
++ }
++ }
++ }
++
++ public static final class PoiDataLoadTask extends CallbackDataLoadTask {
++ public PoiDataLoadTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX,
++ final int chunkZ, final PrioritisedExecutor.Priority priority) {
++ super(scheduler, world, chunkX, chunkZ, RegionFileIOThread.RegionFileType.POI_DATA, priority);
++ }
++
++ @Override
++ protected boolean hasOffMain() {
++ return true;
++ }
++
++ @Override
++ protected boolean hasOnMain() {
++ return false;
++ }
++
++ @Override
++ protected PrioritisedExecutor.PrioritisedTask createOffMain(final Runnable run, final PrioritisedExecutor.Priority priority) {
++ return this.scheduler.loadExecutor.createTask(run, priority);
++ }
++
++ @Override
++ protected PrioritisedExecutor.PrioritisedTask createOnMain(final Runnable run, final PrioritisedExecutor.Priority priority) {
++ throw new UnsupportedOperationException();
++ }
++
++ @Override
++ protected TaskResult completeOnMainOffMain(final PoiChunk data, final Throwable throwable) {
++ throw new UnsupportedOperationException();
++ }
++
++ @Override
++ protected TaskResult runOffMain(CompoundTag data, final Throwable throwable) {
++ if (throwable != null) {
++ LOGGER.error("Failed to load poi data for task: " + this.toString() + ", poi data will be lost", throwable);
++ return new TaskResult<>(PoiChunk.empty(this.world, this.chunkX, this.chunkZ), null);
++ }
++
++ if (data == null || data.isEmpty()) {
++ // nothing to do
++ return new TaskResult<>(PoiChunk.empty(this.world, this.chunkX, this.chunkZ), null);
++ }
++
++ try {
++ data = data.copy(); // coming from the I/O thread, so we need to copy
++ // run converters
++ final int dataVersion = !data.contains(SharedConstants.DATA_VERSION_TAG, 99) ? 1945 : data.getInt(SharedConstants.DATA_VERSION_TAG);
++ final CompoundTag converted = MCDataConverter.convertTag(
++ MCTypeRegistry.POI_CHUNK, data, dataVersion, SharedConstants.getCurrentVersion().getWorldVersion()
++ );
++
++ // now we need to parse it
++ return new TaskResult<>(PoiChunk.parse(this.world, this.chunkX, this.chunkZ, converted), null);
++ } catch (final ThreadDeath death) {
++ throw death;
++ } catch (final Throwable thr2) {
++ LOGGER.error("Failed to run parse poi data for task: " + this.toString() + ", poi data will be lost", thr2);
++ return new TaskResult<>(PoiChunk.empty(this.world, this.chunkX, this.chunkZ), null);
++ }
++ }
++
++ @Override
++ protected TaskResult runOnMain(final PoiChunk data, final Throwable throwable) {
++ throw new UnsupportedOperationException();
++ }
++ }
++
++ public static final class EntityDataLoadTask extends CallbackDataLoadTask {
++
++ public EntityDataLoadTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX,
++ final int chunkZ, final PrioritisedExecutor.Priority priority) {
++ super(scheduler, world, chunkX, chunkZ, RegionFileIOThread.RegionFileType.ENTITY_DATA, priority);
++ }
++
++ @Override
++ protected boolean hasOffMain() {
++ return true;
++ }
++
++ @Override
++ protected boolean hasOnMain() {
++ return false;
++ }
++
++ @Override
++ protected PrioritisedExecutor.PrioritisedTask createOffMain(final Runnable run, final PrioritisedExecutor.Priority priority) {
++ return this.scheduler.loadExecutor.createTask(run, priority);
++ }
++
++ @Override
++ protected PrioritisedExecutor.PrioritisedTask createOnMain(final Runnable run, final PrioritisedExecutor.Priority priority) {
++ throw new UnsupportedOperationException();
++ }
++
++ @Override
++ protected TaskResult completeOnMainOffMain(final CompoundTag data, final Throwable throwable) {
++ throw new UnsupportedOperationException();
++ }
++
++ @Override
++ protected TaskResult runOffMain(final CompoundTag data, final Throwable throwable) {
++ if (throwable != null) {
++ LOGGER.error("Failed to load entity data for task: " + this.toString() + ", entity data will be lost", throwable);
++ return new TaskResult<>(null, null);
++ }
++
++ if (data == null || data.isEmpty()) {
++ // nothing to do
++ return new TaskResult<>(null, null);
++ }
++
++ try {
++ // note: data comes from the I/O thread, so we need to copy it
++ return new TaskResult<>(EntityStorage.upgradeChunkTag(data.copy()), null);
++ } catch (final ThreadDeath death) {
++ throw death;
++ } catch (final Throwable thr2) {
++ LOGGER.error("Failed to run converters for entity data for task: " + this.toString() + ", entity data will be lost", thr2);
++ return new TaskResult<>(null, thr2);
++ }
++ }
++
++ @Override
++ protected TaskResult runOnMain(final CompoundTag data, final Throwable throwable) {
++ throw new UnsupportedOperationException();
++ }
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkProgressionTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkProgressionTask.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..322675a470eacbf0e5452f4009c643f2d0b4ce24
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkProgressionTask.java
+@@ -0,0 +1,105 @@
++package io.papermc.paper.chunk.system.scheduling;
++
++import ca.spottedleaf.concurrentutil.collection.MultiThreadedQueue;
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.world.level.chunk.ChunkAccess;
++import net.minecraft.world.level.chunk.ChunkStatus;
++import java.lang.invoke.VarHandle;
++import java.util.Map;
++import java.util.function.BiConsumer;
++
++public abstract class ChunkProgressionTask {
++
++ private final MultiThreadedQueue> waiters = new MultiThreadedQueue<>();
++ private ChunkAccess completedChunk;
++ private Throwable completedThrowable;
++
++ protected final ChunkTaskScheduler scheduler;
++ protected final ServerLevel world;
++ protected final int chunkX;
++ protected final int chunkZ;
++
++ protected volatile boolean completed;
++ protected static final VarHandle COMPLETED_HANDLE = ConcurrentUtil.getVarHandle(ChunkProgressionTask.class, "completed", boolean.class);
++
++ protected ChunkProgressionTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX, final int chunkZ) {
++ this.scheduler = scheduler;
++ this.world = world;
++ this.chunkX = chunkX;
++ this.chunkZ = chunkZ;
++ }
++
++ // Used only for debug json
++ public abstract boolean isScheduled();
++
++ // Note: It is the responsibility of the task to set the chunk's status once it has completed
++ public abstract ChunkStatus getTargetStatus();
++
++ /* Only executed once */
++ /* Implementations must be prepared to handle cases where cancel() is called before schedule() */
++ public abstract void schedule();
++
++ /* May be called multiple times */
++ public abstract void cancel();
++
++ public abstract PrioritisedExecutor.Priority getPriority();
++
++ /* Schedule lock is always held for the priority update calls */
++
++ public abstract void lowerPriority(final PrioritisedExecutor.Priority priority);
++
++ public abstract void setPriority(final PrioritisedExecutor.Priority priority);
++
++ public abstract void raisePriority(final PrioritisedExecutor.Priority priority);
++
++ public final void onComplete(final BiConsumer onComplete) {
++ if (!this.waiters.add(onComplete)) {
++ try {
++ onComplete.accept(this.completedChunk, this.completedThrowable);
++ } catch (final Throwable throwable) {
++ this.scheduler.unrecoverableChunkSystemFailure(this.chunkX, this.chunkZ, Map.of(
++ "Consumer", ChunkTaskScheduler.stringIfNull(onComplete),
++ "Completed throwable", ChunkTaskScheduler.stringIfNull(this.completedThrowable)
++ ), throwable);
++ if (throwable instanceof ThreadDeath) {
++ throw (ThreadDeath)throwable;
++ }
++ }
++ }
++ }
++
++ protected final void complete(final ChunkAccess chunk, final Throwable throwable) {
++ try {
++ this.complete0(chunk, throwable);
++ } catch (final Throwable thr2) {
++ this.scheduler.unrecoverableChunkSystemFailure(this.chunkX, this.chunkZ, Map.of(
++ "Completed throwable", ChunkTaskScheduler.stringIfNull(throwable)
++ ), thr2);
++ if (thr2 instanceof ThreadDeath) {
++ throw (ThreadDeath)thr2;
++ }
++ }
++ }
++
++ private void complete0(final ChunkAccess chunk, final Throwable throwable) {
++ if ((boolean)COMPLETED_HANDLE.getAndSet((ChunkProgressionTask)this, (boolean)true)) {
++ throw new IllegalStateException("Already completed");
++ }
++ this.completedChunk = chunk;
++ this.completedThrowable = throwable;
++
++ BiConsumer consumer;
++ while ((consumer = this.waiters.pollOrBlockAdds()) != null) {
++ consumer.accept(chunk, throwable);
++ }
++ }
++
++ @Override
++ public String toString() {
++ return "ChunkProgressionTask{class: " + this.getClass().getName() + ", for world: " + this.world.getWorld().getName() +
++ ", chunk: (" + this.chunkX + "," + this.chunkZ + "), hashcode: " + System.identityHashCode(this) + ", priority: " + this.getPriority() +
++ ", status: " + this.getTargetStatus().toString() + ", scheduled: " + this.isScheduled() + "}";
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkTaskScheduler.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkTaskScheduler.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..2b4e3f31d7c31aa5a4a5a18ba9e1d8b3f232fd16
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkTaskScheduler.java
+@@ -0,0 +1,780 @@
++package io.papermc.paper.chunk.system.scheduling;
++
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedThreadPool;
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedThreadedTaskQueue;
++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
++import com.mojang.logging.LogUtils;
++import io.papermc.paper.configuration.GlobalConfiguration;
++import io.papermc.paper.util.CoordinateUtils;
++import io.papermc.paper.util.TickThread;
++import net.minecraft.CrashReport;
++import net.minecraft.CrashReportCategory;
++import net.minecraft.ReportedException;
++import net.minecraft.server.MCUtil;
++import net.minecraft.server.MinecraftServer;
++import net.minecraft.server.level.ChunkHolder;
++import net.minecraft.server.level.ChunkMap;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.server.level.TicketType;
++import net.minecraft.world.level.ChunkPos;
++import net.minecraft.world.level.chunk.ChunkAccess;
++import net.minecraft.world.level.chunk.ChunkStatus;
++import net.minecraft.world.level.chunk.LevelChunk;
++import org.bukkit.Bukkit;
++import org.slf4j.Logger;
++import java.io.File;
++import java.util.ArrayDeque;
++import java.util.ArrayList;
++import java.util.Arrays;
++import java.util.Collections;
++import java.util.List;
++import java.util.Map;
++import java.util.Objects;
++import java.util.concurrent.atomic.AtomicBoolean;
++import java.util.concurrent.atomic.AtomicLong;
++import java.util.concurrent.locks.ReentrantLock;
++import java.util.function.BooleanSupplier;
++import java.util.function.Consumer;
++
++public final class ChunkTaskScheduler {
++
++ private static final Logger LOGGER = LogUtils.getLogger();
++
++ static int newChunkSystemIOThreads;
++ static int newChunkSystemWorkerThreads;
++ static int newChunkSystemGenParallelism;
++ static int newChunkSystemLoadParallelism;
++
++ public static ca.spottedleaf.concurrentutil.executor.standard.PrioritisedThreadPool workerThreads;
++
++ private static boolean initialised = false;
++
++ public static void init(final GlobalConfiguration.ChunkSystem config) {
++ if (initialised) {
++ return;
++ }
++ initialised = true;
++ newChunkSystemIOThreads = config.ioThreads;
++ newChunkSystemWorkerThreads = config.workerThreads;
++ if (newChunkSystemIOThreads < 0) {
++ newChunkSystemIOThreads = 1;
++ } else {
++ newChunkSystemIOThreads = Math.max(1, newChunkSystemIOThreads);
++ }
++ int defaultWorkerThreads = Runtime.getRuntime().availableProcessors() / 2;
++ if (defaultWorkerThreads <= 4) {
++ defaultWorkerThreads = defaultWorkerThreads <= 3 ? 1 : 2;
++ } else {
++ defaultWorkerThreads = defaultWorkerThreads / 2;
++ }
++ defaultWorkerThreads = Integer.getInteger("Paper.WorkerThreadCount", Integer.valueOf(defaultWorkerThreads));
++
++ if (newChunkSystemWorkerThreads < 0) {
++ newChunkSystemWorkerThreads = defaultWorkerThreads;
++ } else {
++ newChunkSystemWorkerThreads = Math.max(1, newChunkSystemWorkerThreads);
++ }
++
++ String newChunkSystemGenParallelism = config.genParallelism;
++ if (newChunkSystemGenParallelism.equalsIgnoreCase("default")) {
++ newChunkSystemGenParallelism = "true";
++ }
++ boolean useParallelGen;
++ if (newChunkSystemGenParallelism.equalsIgnoreCase("on") || newChunkSystemGenParallelism.equalsIgnoreCase("enabled")
++ || newChunkSystemGenParallelism.equalsIgnoreCase("true")) {
++ useParallelGen = true;
++ } else if (newChunkSystemGenParallelism.equalsIgnoreCase("off") || newChunkSystemGenParallelism.equalsIgnoreCase("disabled")
++ || newChunkSystemGenParallelism.equalsIgnoreCase("false")) {
++ useParallelGen = false;
++ } else {
++ throw new IllegalStateException("Invalid option for gen-parallelism: must be one of [on, off, enabled, disabled, true, false, default]");
++ }
++
++ ChunkTaskScheduler.newChunkSystemGenParallelism = useParallelGen ? newChunkSystemWorkerThreads : 1;
++ ChunkTaskScheduler.newChunkSystemLoadParallelism = newChunkSystemWorkerThreads;
++
++ io.papermc.paper.chunk.system.io.RegionFileIOThread.init(newChunkSystemIOThreads);
++ workerThreads = new ca.spottedleaf.concurrentutil.executor.standard.PrioritisedThreadPool(
++ "Paper Chunk System Worker Pool", newChunkSystemWorkerThreads,
++ (final Thread thread, final Integer id) -> {
++ thread.setPriority(Thread.NORM_PRIORITY - 2);
++ thread.setName("Tuinity Chunk System Worker #" + id.intValue());
++ thread.setUncaughtExceptionHandler(io.papermc.paper.chunk.system.scheduling.NewChunkHolder.CHUNKSYSTEM_UNCAUGHT_EXCEPTION_HANDLER);
++ }, (long)(20.0e6)); // 20ms
++
++ LOGGER.info("Chunk system is using " + newChunkSystemIOThreads + " I/O threads, " + newChunkSystemWorkerThreads + " worker threads, and gen parallelism of " + ChunkTaskScheduler.newChunkSystemGenParallelism + " threads");
++ }
++
++ public final ServerLevel world;
++ public final PrioritisedThreadPool workers;
++ public final PrioritisedThreadPool.PrioritisedPoolExecutor lightExecutor;
++ public final PrioritisedThreadPool.PrioritisedPoolExecutor genExecutor;
++ public final PrioritisedThreadPool.PrioritisedPoolExecutor parallelGenExecutor;
++ public final PrioritisedThreadPool.PrioritisedPoolExecutor loadExecutor;
++
++ private final PrioritisedThreadedTaskQueue mainThreadExecutor = new PrioritisedThreadedTaskQueue();
++
++ final ReentrantLock schedulingLock = new ReentrantLock();
++ public final ChunkHolderManager chunkHolderManager;
++
++ static {
++ ChunkStatus.EMPTY.writeRadius = 0;
++ ChunkStatus.STRUCTURE_STARTS.writeRadius = 0;
++ ChunkStatus.STRUCTURE_REFERENCES.writeRadius = 0;
++ ChunkStatus.BIOMES.writeRadius = 0;
++ ChunkStatus.NOISE.writeRadius = 0;
++ ChunkStatus.SURFACE.writeRadius = 0;
++ ChunkStatus.CARVERS.writeRadius = 0;
++ ChunkStatus.LIQUID_CARVERS.writeRadius = 0;
++ ChunkStatus.FEATURES.writeRadius = 1;
++ ChunkStatus.LIGHT.writeRadius = 1;
++ ChunkStatus.SPAWN.writeRadius = 0;
++ ChunkStatus.HEIGHTMAPS.writeRadius = 0;
++ ChunkStatus.FULL.writeRadius = 0;
++
++ /*
++ It's important that the neighbour read radius is taken into account. If _any_ later status is using some chunk as
++ a neighbour, it must be also safe if that neighbour is being generated. i.e for any status later than FEATURES,
++ for a status to be parallel safe it must not read the block data from its neighbours.
++ */
++ final List parallelCapableStatus = Arrays.asList(
++ // No-op executor.
++ ChunkStatus.EMPTY,
++
++ // This is parallel capable, as CB has fixed the concurrency issue with stronghold generations.
++ // Does not touch neighbour chunks.
++ // TODO On another note, what the fuck is StructureFeatureManager.StructureCheck and why is it used? it's leaking
++ ChunkStatus.STRUCTURE_STARTS,
++
++ // Surprisingly this is parallel capable. It is simply reading the already-created structure starts
++ // into the structure references for the chunk. So while it reads from it neighbours, its neighbours
++ // will not change, even if executed in parallel.
++ ChunkStatus.STRUCTURE_REFERENCES,
++
++ // Safe. Mojang runs it in parallel as well.
++ ChunkStatus.BIOMES,
++
++ // Safe. Mojang runs it in parallel as well.
++ ChunkStatus.NOISE,
++
++ // Parallel safe. Only touches the target chunk. Biome retrieval is now noise based, which is
++ // completely thread-safe.
++ ChunkStatus.SURFACE,
++
++ // No global state is modified in the carvers. It only touches the specified chunk. So it is parallel safe.
++ ChunkStatus.CARVERS,
++
++ // No-op executor. Was replaced in 1.18 with carvers, I think.
++ ChunkStatus.LIQUID_CARVERS,
++
++ // FEATURES is not parallel safe. It writes to neighbours.
++
++ // LIGHT is not parallel safe. It also doesn't run on the generation executor, so no point.
++
++ // Only writes to the specified chunk. State is not read by later statuses. Parallel safe.
++ // Note: it may look unsafe because it writes to a worldgenregion, but the region size is always 0 -
++ // see the task margin.
++ // However, if the neighbouring FEATURES chunk is unloaded, but then fails to load in again (for whatever
++ // reason), then it would write to this chunk - and since this status reads blocks from itself, it's not
++ // safe to execute this in parallel.
++ // SPAWN
++
++ // No-op executor.
++ ChunkStatus.HEIGHTMAPS
++
++ // FULL is executed on main.
++ );
++
++ for (final ChunkStatus status : parallelCapableStatus) {
++ status.isParallelCapable = true;
++ }
++ }
++
++ public ChunkTaskScheduler(final ServerLevel world, final PrioritisedThreadPool workers) {
++ this.world = world;
++ this.workers = workers;
++
++ final String worldName = world.getWorld().getName();
++ this.genExecutor = workers.createExecutor("Chunk single-threaded generation executor for world '" + worldName + "'", 1);
++ // same as genExecutor, as there are race conditions between updating blocks in FEATURE status while lighting chunks
++ this.lightExecutor = this.genExecutor;
++ this.parallelGenExecutor = newChunkSystemGenParallelism <= 1 ? this.genExecutor
++ : workers.createExecutor("Chunk parallel generation executor for world '" + worldName + "'", newChunkSystemGenParallelism);
++ this.loadExecutor = workers.createExecutor("Chunk load executor for world '" + worldName + "'", newChunkSystemLoadParallelism);
++ this.chunkHolderManager = new ChunkHolderManager(world, this);
++ }
++
++ private final AtomicBoolean failedChunkSystem = new AtomicBoolean();
++
++ public static Object stringIfNull(final Object obj) {
++ return obj == null ? "null" : obj;
++ }
++
++ public void unrecoverableChunkSystemFailure(final int chunkX, final int chunkZ, final Map objectsOfInterest, final Throwable thr) {
++ final NewChunkHolder holder = this.chunkHolderManager.getChunkHolder(chunkX, chunkZ);
++ LOGGER.error("Chunk system error at chunk (" + chunkX + "," + chunkZ + "), holder: " + holder + ", exception:", new Throwable(thr));
++
++ if (this.failedChunkSystem.getAndSet(true)) {
++ return;
++ }
++
++ final ReportedException reportedException = thr instanceof ReportedException ? (ReportedException)thr : new ReportedException(new CrashReport("Chunk system error", thr));
++
++ CrashReportCategory crashReportCategory = reportedException.getReport().addCategory("Chunk system details");
++ crashReportCategory.setDetail("Chunk coordinate", new ChunkPos(chunkX, chunkZ).toString());
++ crashReportCategory.setDetail("ChunkHolder", Objects.toString(holder));
++ crashReportCategory.setDetail("unrecoverableChunkSystemFailure caller thread", Thread.currentThread().getName());
++
++ crashReportCategory = reportedException.getReport().addCategory("Chunk System Objects of Interest");
++ for (final Map.Entry entry : objectsOfInterest.entrySet()) {
++ if (entry.getValue() instanceof Throwable thrObject) {
++ crashReportCategory.setDetailError(Objects.toString(entry.getKey()), thrObject);
++ } else {
++ crashReportCategory.setDetail(Objects.toString(entry.getKey()), Objects.toString(entry.getValue()));
++ }
++ }
++
++ final Runnable crash = () -> {
++ throw new RuntimeException("Chunk system crash propagated from unrecoverableChunkSystemFailure", reportedException);
++ };
++
++ // this may not be good enough, specifically thanks to stupid ass plugins swallowing exceptions
++ this.scheduleChunkTask(chunkX, chunkZ, crash, PrioritisedExecutor.Priority.BLOCKING);
++ // so, make the main thread pick it up
++ MinecraftServer.chunkSystemCrash = new RuntimeException("Chunk system crash propagated from unrecoverableChunkSystemFailure", reportedException);
++ }
++
++ public boolean executeMainThreadTask() {
++ TickThread.ensureTickThread("Cannot execute main thread task off-main");
++ return this.mainThreadExecutor.executeTask();
++ }
++
++ public void raisePriority(final int x, final int z, final PrioritisedExecutor.Priority priority) {
++ this.chunkHolderManager.raisePriority(x, z, priority);
++ }
++
++ public void setPriority(final int x, final int z, final PrioritisedExecutor.Priority priority) {
++ this.chunkHolderManager.setPriority(x, z, priority);
++ }
++
++ public void lowerPriority(final int x, final int z, final PrioritisedExecutor.Priority priority) {
++ this.chunkHolderManager.lowerPriority(x, z, priority);
++ }
++
++ private final AtomicLong chunkLoadCounter = new AtomicLong();
++
++ public void scheduleTickingState(final int chunkX, final int chunkZ, final ChunkHolder.FullChunkStatus toStatus,
++ final boolean addTicket, final PrioritisedExecutor.Priority priority,
++ final Consumer onComplete) {
++ if (!TickThread.isTickThread()) {
++ this.scheduleChunkTask(chunkX, chunkZ, () -> {
++ ChunkTaskScheduler.this.scheduleTickingState(chunkX, chunkZ, toStatus, addTicket, priority, onComplete);
++ }, priority);
++ return;
++ }
++ if (this.chunkHolderManager.ticketLock.isHeldByCurrentThread()) {
++ throw new IllegalStateException("Cannot schedule chunk load during ticket level update");
++ }
++ if (this.schedulingLock.isHeldByCurrentThread()) {
++ throw new IllegalStateException("Cannot schedule chunk loading recursively");
++ }
++
++ if (toStatus == ChunkHolder.FullChunkStatus.INACCESSIBLE) {
++ throw new IllegalArgumentException("Cannot wait for INACCESSIBLE status");
++ }
++
++ final int minLevel = 33 - (toStatus.ordinal() - 1);
++ final Long chunkReference = addTicket ? Long.valueOf(this.chunkLoadCounter.getAndIncrement()) : null;
++ final long chunkKey = CoordinateUtils.getChunkKey(chunkX, chunkZ);
++
++ if (addTicket) {
++ this.chunkHolderManager.addTicketAtLevel(TicketType.CHUNK_LOAD, chunkKey, minLevel, chunkReference);
++ this.chunkHolderManager.processTicketUpdates();
++ }
++
++ final Consumer loadCallback = (final LevelChunk chunk) -> {
++ try {
++ if (onComplete != null) {
++ onComplete.accept(chunk);
++ }
++ } finally {
++ if (addTicket) {
++ ChunkTaskScheduler.this.chunkHolderManager.addAndRemoveTickets(chunkKey,
++ TicketType.UNKNOWN, minLevel, new ChunkPos(chunkKey),
++ TicketType.CHUNK_LOAD, minLevel, chunkReference
++ );
++ }
++ }
++ };
++
++ final boolean scheduled;
++ final LevelChunk chunk;
++ this.chunkHolderManager.ticketLock.lock();
++ try {
++ this.schedulingLock.lock();
++ try {
++ final NewChunkHolder chunkHolder = this.chunkHolderManager.getChunkHolder(chunkKey);
++ if (chunkHolder == null || chunkHolder.getTicketLevel() > minLevel) {
++ scheduled = false;
++ chunk = null;
++ } else {
++ final ChunkHolder.FullChunkStatus currStatus = chunkHolder.getChunkStatus();
++ if (currStatus.isOrAfter(toStatus)) {
++ scheduled = false;
++ chunk = (LevelChunk)chunkHolder.getCurrentChunk();
++ } else {
++ scheduled = true;
++ chunk = null;
++
++ final int radius = toStatus.ordinal() - 1; // 0 -> BORDER, 1 -> TICKING, 2 -> ENTITY_TICKING
++ for (int dz = -radius; dz <= radius; ++dz) {
++ for (int dx = -radius; dx <= radius; ++dx) {
++ final NewChunkHolder neighbour =
++ (dx | dz) == 0 ? chunkHolder : this.chunkHolderManager.getChunkHolder(dx + chunkX, dz + chunkZ);
++ if (neighbour != null) {
++ neighbour.raisePriority(priority);
++ }
++ }
++ }
++
++ // ticket level should schedule for us
++ chunkHolder.addFullStatusConsumer(toStatus, loadCallback);
++ }
++ }
++ } finally {
++ this.schedulingLock.unlock();
++ }
++ } finally {
++ this.chunkHolderManager.ticketLock.unlock();
++ }
++
++ if (!scheduled) {
++ // couldn't schedule
++ try {
++ loadCallback.accept(chunk);
++ } catch (final ThreadDeath thr) {
++ throw thr;
++ } catch (final Throwable thr) {
++ LOGGER.error("Failed to process chunk full status callback", thr);
++ }
++ }
++ }
++
++ public void scheduleChunkLoad(final int chunkX, final int chunkZ, final boolean gen, final ChunkStatus toStatus, final boolean addTicket,
++ final PrioritisedExecutor.Priority priority, final Consumer onComplete) {
++ if (gen) {
++ this.scheduleChunkLoad(chunkX, chunkZ, toStatus, addTicket, priority, onComplete);
++ return;
++ }
++ this.scheduleChunkLoad(chunkX, chunkZ, ChunkStatus.EMPTY, addTicket, priority, (final ChunkAccess chunk) -> {
++ if (chunk == null) {
++ onComplete.accept(null);
++ } else {
++ if (chunk.getStatus().isOrAfter(toStatus)) {
++ this.scheduleChunkLoad(chunkX, chunkZ, toStatus, addTicket, priority, onComplete);
++ } else {
++ onComplete.accept(null);
++ }
++ }
++ });
++ }
++
++ public void scheduleChunkLoad(final int chunkX, final int chunkZ, final ChunkStatus toStatus, final boolean addTicket,
++ final PrioritisedExecutor.Priority priority, final Consumer onComplete) {
++ if (!TickThread.isTickThread()) {
++ this.scheduleChunkTask(chunkX, chunkZ, () -> {
++ ChunkTaskScheduler.this.scheduleChunkLoad(chunkX, chunkZ, toStatus, addTicket, priority, onComplete);
++ }, priority);
++ return;
++ }
++ if (this.chunkHolderManager.ticketLock.isHeldByCurrentThread()) {
++ throw new IllegalStateException("Cannot schedule chunk load during ticket level update");
++ }
++ if (this.schedulingLock.isHeldByCurrentThread()) {
++ throw new IllegalStateException("Cannot schedule chunk loading recursively");
++ }
++
++ if (toStatus == ChunkStatus.FULL) {
++ this.scheduleTickingState(chunkX, chunkZ, ChunkHolder.FullChunkStatus.BORDER, addTicket, priority, (Consumer)onComplete);
++ return;
++ }
++
++ final int minLevel = 33 + ChunkStatus.getDistance(toStatus);
++ final Long chunkReference = addTicket ? Long.valueOf(this.chunkLoadCounter.getAndIncrement()) : null;
++ final long chunkKey = CoordinateUtils.getChunkKey(chunkX, chunkZ);
++
++ if (addTicket) {
++ this.chunkHolderManager.addTicketAtLevel(TicketType.CHUNK_LOAD, chunkKey, minLevel, chunkReference);
++ this.chunkHolderManager.processTicketUpdates();
++ }
++
++ final Consumer loadCallback = (final ChunkAccess chunk) -> {
++ try {
++ if (onComplete != null) {
++ onComplete.accept(chunk);
++ }
++ } finally {
++ if (addTicket) {
++ ChunkTaskScheduler.this.chunkHolderManager.addAndRemoveTickets(chunkKey,
++ TicketType.UNKNOWN, minLevel, new ChunkPos(chunkKey),
++ TicketType.CHUNK_LOAD, minLevel, chunkReference
++ );
++ }
++ }
++ };
++
++ final List tasks = new ArrayList<>();
++
++ final boolean scheduled;
++ final ChunkAccess chunk;
++ this.chunkHolderManager.ticketLock.lock();
++ try {
++ this.schedulingLock.lock();
++ try {
++ final NewChunkHolder chunkHolder = this.chunkHolderManager.getChunkHolder(chunkKey);
++ if (chunkHolder == null || chunkHolder.getTicketLevel() > minLevel) {
++ scheduled = false;
++ chunk = null;
++ } else {
++ final ChunkStatus genStatus = chunkHolder.getCurrentGenStatus();
++ if (genStatus != null && genStatus.isOrAfter(toStatus)) {
++ scheduled = false;
++ chunk = chunkHolder.getCurrentChunk();
++ } else {
++ scheduled = true;
++ chunk = null;
++ chunkHolder.raisePriority(priority);
++
++ if (!chunkHolder.upgradeGenTarget(toStatus)) {
++ this.schedule(chunkX, chunkZ, toStatus, chunkHolder, tasks);
++ }
++ chunkHolder.addStatusConsumer(toStatus, loadCallback);
++ }
++ }
++ } finally {
++ this.schedulingLock.unlock();
++ }
++ } finally {
++ this.chunkHolderManager.ticketLock.unlock();
++ }
++
++ for (int i = 0, len = tasks.size(); i < len; ++i) {
++ tasks.get(i).schedule();
++ }
++
++ if (!scheduled) {
++ // couldn't schedule
++ try {
++ loadCallback.accept(chunk);
++ } catch (final ThreadDeath thr) {
++ throw thr;
++ } catch (final Throwable thr) {
++ LOGGER.error("Failed to process chunk status callback", thr);
++ }
++ }
++ }
++
++ private ChunkProgressionTask createTask(final int chunkX, final int chunkZ, final ChunkAccess chunk,
++ final NewChunkHolder chunkHolder, final List neighbours,
++ final ChunkStatus toStatus, final PrioritisedExecutor.Priority initialPriority) {
++ if (toStatus == ChunkStatus.EMPTY) {
++ return new ChunkLoadTask(this, this.world, chunkX, chunkZ, chunkHolder, initialPriority);
++ }
++ if (toStatus == ChunkStatus.LIGHT) {
++ return new ChunkLightTask(this, this.world, chunkX, chunkZ, chunk, initialPriority);
++ }
++ if (toStatus == ChunkStatus.FULL) {
++ return new ChunkFullTask(this, this.world, chunkX, chunkZ, chunkHolder, chunk, initialPriority);
++ }
++
++ return new ChunkUpgradeGenericStatusTask(this, this.world, chunkX, chunkZ, chunk, neighbours, toStatus, initialPriority);
++ }
++
++ ChunkProgressionTask schedule(final int chunkX, final int chunkZ, final ChunkStatus targetStatus, final NewChunkHolder chunkHolder,
++ final List allTasks) {
++ return this.schedule(chunkX, chunkZ, targetStatus, chunkHolder, allTasks, chunkHolder.getEffectivePriority());
++ }
++
++ // rets new task scheduled for the _specified_ chunk
++ // note: this must hold the scheduling lock
++ // minPriority is only used to pass the priority through to neighbours, as priority calculation has not yet been done
++ // schedule will ignore the generation target, so it should be checked by the caller to ensure the target is not regressed!
++ private ChunkProgressionTask schedule(final int chunkX, final int chunkZ, final ChunkStatus targetStatus,
++ final NewChunkHolder chunkHolder, final List allTasks,
++ final PrioritisedExecutor.Priority minPriority) {
++ if (!this.schedulingLock.isHeldByCurrentThread()) {
++ throw new IllegalStateException("Not holding scheduling lock");
++ }
++
++ if (chunkHolder.hasGenerationTask()) {
++ chunkHolder.upgradeGenTarget(targetStatus);
++ return null;
++ }
++
++ final PrioritisedExecutor.Priority requestedPriority = PrioritisedExecutor.Priority.max(minPriority, chunkHolder.getEffectivePriority());
++ final ChunkStatus currentGenStatus = chunkHolder.getCurrentGenStatus();
++ final ChunkAccess chunk = chunkHolder.getCurrentChunk();
++
++ if (currentGenStatus == null) {
++ // not yet loaded
++ final ChunkProgressionTask task = this.createTask(
++ chunkX, chunkZ, chunk, chunkHolder, Collections.emptyList(), ChunkStatus.EMPTY, requestedPriority
++ );
++
++ allTasks.add(task);
++
++ final List chunkHolderNeighbours = new ArrayList<>(1);
++ chunkHolderNeighbours.add(chunkHolder);
++
++ chunkHolder.setGenerationTarget(targetStatus);
++ chunkHolder.setGenerationTask(task, ChunkStatus.EMPTY, chunkHolderNeighbours);
++
++ return task;
++ }
++
++ if (currentGenStatus.isOrAfter(targetStatus)) {
++ // nothing to do
++ return null;
++ }
++
++ // we know for sure now that we want to schedule _something_, so set the target
++ chunkHolder.setGenerationTarget(targetStatus);
++
++ final ChunkStatus chunkRealStatus = chunk.getStatus();
++ final ChunkStatus toStatus = currentGenStatus.getNextStatus();
++
++ // if this chunk has already generated up to or past the specified status, then we don't
++ // need the neighbours AT ALL.
++ final int neighbourReadRadius = chunkRealStatus.isOrAfter(toStatus) ? toStatus.loadRange : toStatus.getRange();
++
++ boolean unGeneratedNeighbours = false;
++
++ // copied from MCUtil.getSpiralOutChunks
++ for (int r = 1; r <= neighbourReadRadius; r++) {
++ int x = -r;
++ int z = r;
++
++ // Iterates the edge of half of the box; then negates for other half.
++ while (x <= r && z > -r) {
++ final int radius = Math.max(Math.abs(x), Math.abs(z));
++ final ChunkStatus requiredNeighbourStatus = ChunkMap.getDependencyStatus(toStatus, radius);
++
++ unGeneratedNeighbours |= this.checkNeighbour(
++ chunkX + x, chunkZ + z, requiredNeighbourStatus, chunkHolder, allTasks, requestedPriority
++ );
++ unGeneratedNeighbours |= this.checkNeighbour(
++ chunkX - x, chunkZ - z, requiredNeighbourStatus, chunkHolder, allTasks, requestedPriority
++ );
++
++ if (x < r) {
++ x++;
++ } else {
++ z--;
++ }
++ }
++ }
++
++ if (unGeneratedNeighbours) {
++ // can't schedule, but neighbour completion will schedule for us when they're ALL done
++
++ // propagate our priority to neighbours
++ chunkHolder.recalculateNeighbourPriorities();
++ return null;
++ }
++
++ // need to gather neighbours
++
++ final List neighbours;
++ final List chunkHolderNeighbours;
++ if (neighbourReadRadius <= 0) {
++ neighbours = new ArrayList<>(1);
++ chunkHolderNeighbours = new ArrayList<>(1);
++ neighbours.add(chunk);
++ chunkHolderNeighbours.add(chunkHolder);
++ } else {
++ // the iteration order is _very_ important, as all generation statuses expect a certain order such that:
++ // chunkAtRelative = neighbours.get(relX + relZ * (2 * radius + 1))
++ neighbours = new ArrayList<>((2 * neighbourReadRadius + 1) * (2 * neighbourReadRadius + 1));
++ chunkHolderNeighbours = new ArrayList<>((2 * neighbourReadRadius + 1) * (2 * neighbourReadRadius + 1));
++ for (int dz = -neighbourReadRadius; dz <= neighbourReadRadius; ++dz) {
++ for (int dx = -neighbourReadRadius; dx <= neighbourReadRadius; ++dx) {
++ final NewChunkHolder holder = (dx | dz) == 0 ? chunkHolder : this.chunkHolderManager.getChunkHolder(dx + chunkX, dz + chunkZ);
++ neighbours.add(holder.getChunkForNeighbourAccess());
++ chunkHolderNeighbours.add(holder);
++ }
++ }
++ }
++
++ final ChunkProgressionTask task = this.createTask(chunkX, chunkZ, chunk, chunkHolder, neighbours, toStatus, chunkHolder.getEffectivePriority());
++ allTasks.add(task);
++
++ chunkHolder.setGenerationTask(task, toStatus, chunkHolderNeighbours);
++
++ return task;
++ }
++
++ // rets true if the neighbour is not at the required status, false otherwise
++ private boolean checkNeighbour(final int chunkX, final int chunkZ, final ChunkStatus requiredStatus, final NewChunkHolder center,
++ final List tasks, final PrioritisedExecutor.Priority minPriority) {
++ final NewChunkHolder chunkHolder = this.chunkHolderManager.getChunkHolder(chunkX, chunkZ);
++
++ if (chunkHolder == null) {
++ throw new IllegalStateException("Missing chunkholder when required");
++ }
++
++ final ChunkStatus holderStatus = chunkHolder.getCurrentGenStatus();
++ if (holderStatus != null && holderStatus.isOrAfter(requiredStatus)) {
++ return false;
++ }
++
++ if (chunkHolder.hasFailedGeneration()) {
++ return true;
++ }
++
++ center.addGenerationBlockingNeighbour(chunkHolder);
++ chunkHolder.addWaitingNeighbour(center, requiredStatus);
++
++ if (chunkHolder.upgradeGenTarget(requiredStatus)) {
++ return true;
++ }
++
++ // not at status required, so we need to schedule its generation
++ this.schedule(
++ chunkX, chunkZ, requiredStatus, chunkHolder, tasks, minPriority
++ );
++
++ return true;
++ }
++
++ /**
++ * @deprecated Chunk tasks must be tied to coordinates in the future
++ */
++ @Deprecated
++ public PrioritisedExecutor.PrioritisedTask scheduleChunkTask(final Runnable run) {
++ return this.scheduleChunkTask(run, PrioritisedExecutor.Priority.NORMAL);
++ }
++
++ /**
++ * @deprecated Chunk tasks must be tied to coordinates in the future
++ */
++ @Deprecated
++ public PrioritisedExecutor.PrioritisedTask scheduleChunkTask(final Runnable run, final PrioritisedExecutor.Priority priority) {
++ return this.mainThreadExecutor.queueRunnable(run, priority);
++ }
++
++ public PrioritisedExecutor.PrioritisedTask createChunkTask(final int chunkX, final int chunkZ, final Runnable run) {
++ return this.createChunkTask(chunkX, chunkZ, run, PrioritisedExecutor.Priority.NORMAL);
++ }
++
++ public PrioritisedExecutor.PrioritisedTask createChunkTask(final int chunkX, final int chunkZ, final Runnable run,
++ final PrioritisedExecutor.Priority priority) {
++ return this.mainThreadExecutor.createTask(run, priority);
++ }
++
++ public PrioritisedExecutor.PrioritisedTask scheduleChunkTask(final int chunkX, final int chunkZ, final Runnable run) {
++ return this.mainThreadExecutor.queueRunnable(run);
++ }
++
++ public PrioritisedExecutor.PrioritisedTask scheduleChunkTask(final int chunkX, final int chunkZ, final Runnable run,
++ final PrioritisedExecutor.Priority priority) {
++ return this.mainThreadExecutor.queueRunnable(run, priority);
++ }
++
++ public void executeTasksUntil(final BooleanSupplier exit) {
++ if (Bukkit.isPrimaryThread()) {
++ this.mainThreadExecutor.executeConditionally(exit);
++ } else {
++ long counter = 1L;
++ while (!exit.getAsBoolean()) {
++ counter = ConcurrentUtil.linearLongBackoff(counter, 100_000L, 5_000_000L); // 100us, 5ms
++ }
++ }
++ }
++
++ public boolean halt(final boolean sync, final long maxWaitNS) {
++ this.lightExecutor.halt();
++ this.genExecutor.halt();
++ this.parallelGenExecutor.halt();
++ this.loadExecutor.halt();
++ final long time = System.nanoTime();
++ if (sync) {
++ for (long failures = 9L;; failures = ConcurrentUtil.linearLongBackoff(failures, 500_000L, 50_000_000L)) {
++ if (
++ !this.lightExecutor.isActive() &&
++ !this.genExecutor.isActive() &&
++ !this.parallelGenExecutor.isActive() &&
++ !this.loadExecutor.isActive()
++ ) {
++ return true;
++ }
++ if ((System.nanoTime() - time) >= maxWaitNS) {
++ return false;
++ }
++ }
++ }
++
++ return true;
++ }
++
++ public static final ArrayDeque WAITING_CHUNKS = new ArrayDeque<>(); // stack
++
++ public static final class ChunkInfo {
++
++ public final int chunkX;
++ public final int chunkZ;
++ public final ServerLevel world;
++
++ public ChunkInfo(final int chunkX, final int chunkZ, final ServerLevel world) {
++ this.chunkX = chunkX;
++ this.chunkZ = chunkZ;
++ this.world = world;
++ }
++
++ @Override
++ public String toString() {
++ return "[( " + this.chunkX + "," + this.chunkZ + ") in '" + this.world.getWorld().getName() + "']";
++ }
++ }
++
++ public static void pushChunkWait(final ServerLevel world, final int chunkX, final int chunkZ) {
++ synchronized (WAITING_CHUNKS) {
++ WAITING_CHUNKS.push(new ChunkInfo(chunkX, chunkZ, world));
++ }
++ }
++
++ public static void popChunkWait() {
++ synchronized (WAITING_CHUNKS) {
++ WAITING_CHUNKS.pop();
++ }
++ }
++
++ public static ChunkInfo[] getChunkInfos() {
++ synchronized (WAITING_CHUNKS) {
++ return WAITING_CHUNKS.toArray(new ChunkInfo[0]);
++ }
++ }
++
++ public static void dumpAllChunkLoadInfo(final boolean longPrint) {
++ final ChunkInfo[] chunkInfos = getChunkInfos();
++ if (chunkInfos.length > 0) {
++ LOGGER.error("Chunk wait task info below: ");
++ for (final ChunkInfo chunkInfo : chunkInfos) {
++ final NewChunkHolder holder = chunkInfo.world.chunkTaskScheduler.chunkHolderManager.getChunkHolder(chunkInfo.chunkX, chunkInfo.chunkZ);
++ LOGGER.error("Chunk wait: " + chunkInfo);
++ LOGGER.error("Chunk holder: " + holder);
++ }
++
++ if (longPrint) {
++ final File file = new File(new File(new File("."), "debug"), "chunks-watchdog.txt");
++ LOGGER.error("Writing chunk information dump to " + file);
++ try {
++ MCUtil.dumpChunks(file, true);
++ LOGGER.error("Successfully written chunk information!");
++ } catch (final Throwable thr) {
++ MinecraftServer.LOGGER.warn("Failed to dump chunk information to file " + file.toString(), thr);
++ }
++ }
++ }
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkUpgradeGenericStatusTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkUpgradeGenericStatusTask.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..e96ecf351a1952b4e23e9a352f32d326146380e7
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkUpgradeGenericStatusTask.java
+@@ -0,0 +1,209 @@
++package io.papermc.paper.chunk.system.scheduling;
++
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
++import com.mojang.datafixers.util.Either;
++import com.mojang.logging.LogUtils;
++import net.minecraft.server.level.ChunkHolder;
++import net.minecraft.server.level.ChunkMap;
++import net.minecraft.server.level.ServerChunkCache;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.world.level.chunk.ChunkAccess;
++import net.minecraft.world.level.chunk.ChunkStatus;
++import net.minecraft.world.level.chunk.ProtoChunk;
++import org.slf4j.Logger;
++import java.lang.invoke.VarHandle;
++import java.util.List;
++import java.util.Map;
++import java.util.concurrent.CompletableFuture;
++
++public final class ChunkUpgradeGenericStatusTask extends ChunkProgressionTask implements Runnable {
++
++ private static final Logger LOGGER = LogUtils.getLogger();
++
++ protected final ChunkAccess fromChunk;
++ protected final ChunkStatus fromStatus;
++ protected final ChunkStatus toStatus;
++ protected final List neighbours;
++
++ protected final PrioritisedExecutor.PrioritisedTask generateTask;
++
++ public ChunkUpgradeGenericStatusTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX,
++ final int chunkZ, final ChunkAccess chunk, final List neighbours,
++ final ChunkStatus toStatus, final PrioritisedExecutor.Priority priority) {
++ super(scheduler, world, chunkX, chunkZ);
++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority " + priority);
++ }
++ this.fromChunk = chunk;
++ this.fromStatus = chunk.getStatus();
++ this.toStatus = toStatus;
++ this.neighbours = neighbours;
++ this.generateTask = (this.toStatus.isParallelCapable ? this.scheduler.parallelGenExecutor : this.scheduler.genExecutor)
++ .createTask(this, priority);
++ }
++
++ @Override
++ public ChunkStatus getTargetStatus() {
++ return this.toStatus;
++ }
++
++ private boolean isEmptyTask() {
++ // must use fromStatus here to avoid any race condition with run() overwriting the status
++ final boolean generation = !this.fromStatus.isOrAfter(this.toStatus);
++ return (generation && this.toStatus.isEmptyGenStatus()) || (!generation && this.toStatus.isEmptyLoadStatus());
++ }
++
++ @Override
++ public void run() {
++ final ChunkAccess chunk = this.fromChunk;
++
++ final ServerChunkCache serverChunkCache = this.world.chunkSource;
++ final ChunkMap chunkMap = serverChunkCache.chunkMap;
++
++ final CompletableFuture> completeFuture;
++
++ final boolean generation;
++ boolean completing = false;
++
++ // note: should optimise the case where the chunk does not need to execute the status, because
++ // schedule() calls this synchronously if it will run through that path
++
++ try {
++ generation = !chunk.getStatus().isOrAfter(this.toStatus);
++ if (generation) {
++ if (this.toStatus.isEmptyGenStatus()) {
++ if (chunk instanceof ProtoChunk) {
++ ((ProtoChunk)chunk).setStatus(this.toStatus);
++ }
++ completing = true;
++ this.complete(chunk, null);
++ return;
++ }
++ completeFuture = this.toStatus.generate(Runnable::run, this.world, chunkMap.generator, chunkMap.structureTemplateManager,
++ serverChunkCache.getLightEngine(), null, this.neighbours, false)
++ .whenComplete((final Either either, final Throwable throwable) -> {
++ final ChunkAccess newChunk = (either == null) ? null : either.left().orElse(null);
++ if (newChunk instanceof ProtoChunk) {
++ ((ProtoChunk)newChunk).setStatus(ChunkUpgradeGenericStatusTask.this.toStatus);
++ }
++ }
++ );
++ } else {
++ if (this.toStatus.isEmptyLoadStatus()) {
++ completing = true;
++ this.complete(chunk, null);
++ return;
++ }
++ completeFuture = this.toStatus.load(this.world, chunkMap.structureTemplateManager, serverChunkCache.getLightEngine(), null, chunk);
++ }
++ } catch (final Throwable throwable) {
++ if (!completing) {
++ this.complete(null, throwable);
++
++ if (throwable instanceof ThreadDeath) {
++ throw (ThreadDeath)throwable;
++ }
++ return;
++ }
++
++ this.scheduler.unrecoverableChunkSystemFailure(this.chunkX, this.chunkZ, Map.of(
++ "Target status", ChunkTaskScheduler.stringIfNull(this.toStatus),
++ "From status", ChunkTaskScheduler.stringIfNull(this.fromStatus),
++ "Generation task", this
++ ), throwable);
++
++ if (!(throwable instanceof ThreadDeath)) {
++ LOGGER.error("Failed to complete status for chunk: status:" + this.toStatus + ", chunk: (" + this.chunkX + "," + this.chunkZ + "), world: " + this.world.getWorld().getName(), throwable);
++ } else {
++ // ensure the chunk system can respond, then die
++ throw (ThreadDeath)throwable;
++ }
++ return;
++ }
++
++ if (!completeFuture.isDone() && !this.toStatus.warnedAboutNoImmediateComplete.getAndSet(true)) {
++ LOGGER.warn("Future status not complete after scheduling: " + this.toStatus.toString() + ", generate: " + generation);
++ }
++
++ final Either either;
++ final ChunkAccess newChunk;
++
++ try {
++ either = completeFuture.join();
++ newChunk = (either == null) ? null : either.left().orElse(null);
++ } catch (final Throwable throwable) {
++ this.complete(null, throwable);
++ // ensure the chunk system can respond, then die
++ if (throwable instanceof ThreadDeath) {
++ throw (ThreadDeath)throwable;
++ }
++ return;
++ }
++
++ if (newChunk == null) {
++ this.complete(null, new IllegalStateException("Chunk for status: " + ChunkUpgradeGenericStatusTask.this.toStatus.toString() + ", generation: " + generation + " should not be null! Either: " + either).fillInStackTrace());
++ return;
++ }
++
++ this.complete(newChunk, null);
++ }
++
++ protected volatile boolean scheduled;
++ protected static final VarHandle SCHEDULED_HANDLE = ConcurrentUtil.getVarHandle(ChunkUpgradeGenericStatusTask.class, "scheduled", boolean.class);
++
++ @Override
++ public boolean isScheduled() {
++ return this.scheduled;
++ }
++
++ @Override
++ public void schedule() {
++ if ((boolean)SCHEDULED_HANDLE.getAndSet((ChunkUpgradeGenericStatusTask)this, true)) {
++ throw new IllegalStateException("Cannot double call schedule()");
++ }
++ if (this.isEmptyTask()) {
++ if (this.generateTask.cancel()) {
++ this.run();
++ }
++ } else {
++ this.generateTask.queue();
++ }
++ }
++
++ @Override
++ public void cancel() {
++ if (this.generateTask.cancel()) {
++ this.complete(null, null);
++ }
++ }
++
++ @Override
++ public PrioritisedExecutor.Priority getPriority() {
++ return this.generateTask.getPriority();
++ }
++
++ @Override
++ public void lowerPriority(final PrioritisedExecutor.Priority priority) {
++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority " + priority);
++ }
++ this.generateTask.lowerPriority(priority);
++ }
++
++ @Override
++ public void setPriority(final PrioritisedExecutor.Priority priority) {
++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority " + priority);
++ }
++ this.generateTask.setPriority(priority);
++ }
++
++ @Override
++ public void raisePriority(final PrioritisedExecutor.Priority priority) {
++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority " + priority);
++ }
++ this.generateTask.raisePriority(priority);
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/GenericDataLoadTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/GenericDataLoadTask.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..ffbfaef2a57f0f26d0143f3a8fcf937bee7e7398
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/GenericDataLoadTask.java
+@@ -0,0 +1,746 @@
++package io.papermc.paper.chunk.system.scheduling;
++
++import ca.spottedleaf.concurrentutil.completable.Completable;
++import ca.spottedleaf.concurrentutil.executor.Cancellable;
++import ca.spottedleaf.concurrentutil.executor.standard.DelayedPrioritisedTask;
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
++import com.mojang.logging.LogUtils;
++import io.papermc.paper.chunk.system.io.RegionFileIOThread;
++import net.minecraft.nbt.CompoundTag;
++import net.minecraft.server.level.ServerLevel;
++import org.slf4j.Logger;
++import java.lang.invoke.VarHandle;
++import java.util.Map;
++import java.util.concurrent.atomic.AtomicBoolean;
++import java.util.concurrent.atomic.AtomicLong;
++import java.util.function.BiConsumer;
++
++public abstract class GenericDataLoadTask {
++
++ private static final Logger LOGGER = LogUtils.getLogger();
++
++ protected static final CompoundTag CANCELLED_DATA = new CompoundTag();
++
++ // reference count is the upper 32 bits
++ protected final AtomicLong stageAndReferenceCount = new AtomicLong(STAGE_NOT_STARTED);
++
++ protected static final long STAGE_MASK = 0xFFFFFFFFL;
++ protected static final long STAGE_CANCELLED = 0xFFFFFFFFL;
++ protected static final long STAGE_NOT_STARTED = 0L;
++ protected static final long STAGE_LOADING = 1L;
++ protected static final long STAGE_PROCESSING = 2L;
++ protected static final long STAGE_COMPLETED = 3L;
++
++ // for loading data off disk
++ protected final LoadDataFromDiskTask loadDataFromDiskTask;
++ // processing off-main
++ protected final PrioritisedExecutor.PrioritisedTask processOffMain;
++ // processing on-main
++ protected final PrioritisedExecutor.PrioritisedTask processOnMain;
++
++ protected final ChunkTaskScheduler scheduler;
++ protected final ServerLevel world;
++ protected final int chunkX;
++ protected final int chunkZ;
++ protected final RegionFileIOThread.RegionFileType type;
++
++ public GenericDataLoadTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX,
++ final int chunkZ, final RegionFileIOThread.RegionFileType type,
++ final PrioritisedExecutor.Priority priority) {
++ this.scheduler = scheduler;
++ this.world = world;
++ this.chunkX = chunkX;
++ this.chunkZ = chunkZ;
++ this.type = type;
++
++ final ProcessOnMainTask mainTask;
++ if (this.hasOnMain()) {
++ mainTask = new ProcessOnMainTask();
++ this.processOnMain = this.createOnMain(mainTask, priority);
++ } else {
++ mainTask = null;
++ this.processOnMain = null;
++ }
++
++ final ProcessOffMainTask offMainTask;
++ if (this.hasOffMain()) {
++ offMainTask = new ProcessOffMainTask(mainTask);
++ this.processOffMain = this.createOffMain(offMainTask, priority);
++ } else {
++ offMainTask = null;
++ this.processOffMain = null;
++ }
++
++ if (this.processOffMain == null && this.processOnMain == null) {
++ throw new IllegalStateException("Illegal class implementation: " + this.getClass().getName() + ", should be able to schedule at least one task!");
++ }
++
++ this.loadDataFromDiskTask = new LoadDataFromDiskTask(world, chunkX, chunkZ, type, new DataLoadCallback(offMainTask, mainTask), priority);
++ }
++
++ public static final record TaskResult(L left, R right) {}
++
++ protected abstract boolean hasOffMain();
++
++ protected abstract boolean hasOnMain();
++
++ protected abstract PrioritisedExecutor.PrioritisedTask createOffMain(final Runnable run, final PrioritisedExecutor.Priority priority);
++
++ protected abstract PrioritisedExecutor.PrioritisedTask createOnMain(final Runnable run, final PrioritisedExecutor.Priority priority);
++
++ protected abstract TaskResult runOffMain(final CompoundTag data, final Throwable throwable);
++
++ protected abstract TaskResult runOnMain(final OnMain data, final Throwable throwable);
++
++ protected abstract void onComplete(final TaskResult result);
++
++ protected abstract TaskResult completeOnMainOffMain(final OnMain data, final Throwable throwable);
++
++ @Override
++ public String toString() {
++ return "GenericDataLoadTask{class: " + this.getClass().getName() + ", world: " + this.world.getWorld().getName() +
++ ", chunk: (" + this.chunkX + "," + this.chunkZ + "), hashcode: " + System.identityHashCode(this) + ", priority: " + this.getPriority() +
++ ", type: " + this.type.toString() + "}";
++ }
++
++ public PrioritisedExecutor.Priority getPriority() {
++ if (this.processOnMain != null) {
++ return this.processOnMain.getPriority();
++ } else {
++ return this.processOffMain.getPriority();
++ }
++ }
++
++ public void lowerPriority(final PrioritisedExecutor.Priority priority) {
++ // can't lower I/O tasks, we don't know what they affect
++ if (this.processOffMain != null) {
++ this.processOffMain.lowerPriority(priority);
++ }
++ if (this.processOnMain != null) {
++ this.processOnMain.lowerPriority(priority);
++ }
++ }
++
++ public void setPriority(final PrioritisedExecutor.Priority priority) {
++ // can't lower I/O tasks, we don't know what they affect
++ this.loadDataFromDiskTask.raisePriority(priority);
++ if (this.processOffMain != null) {
++ this.processOffMain.setPriority(priority);
++ }
++ if (this.processOnMain != null) {
++ this.processOnMain.setPriority(priority);
++ }
++ }
++
++ public void raisePriority(final PrioritisedExecutor.Priority priority) {
++ // can't lower I/O tasks, we don't know what they affect
++ this.loadDataFromDiskTask.raisePriority(priority);
++ if (this.processOffMain != null) {
++ this.processOffMain.raisePriority(priority);
++ }
++ if (this.processOnMain != null) {
++ this.processOnMain.raisePriority(priority);
++ }
++ }
++
++ // returns whether scheduleNow() needs to be called
++ public boolean schedule(final boolean delay) {
++ if (this.stageAndReferenceCount.get() != STAGE_NOT_STARTED ||
++ !this.stageAndReferenceCount.compareAndSet(STAGE_NOT_STARTED, (1L << 32) | STAGE_LOADING)) {
++ // try and increment reference count
++ int failures = 0;
++ for (long curr = this.stageAndReferenceCount.get();;) {
++ if ((curr & STAGE_MASK) == STAGE_CANCELLED || (curr & STAGE_MASK) == STAGE_COMPLETED) {
++ // cancelled or completed, nothing to do here
++ return false;
++ }
++
++ if (curr == (curr = this.stageAndReferenceCount.compareAndExchange(curr, curr + (1L << 32)))) {
++ // successful
++ return false;
++ }
++
++ ++failures;
++ for (int i = 0; i < failures; ++i) {
++ ConcurrentUtil.backoff();
++ }
++ }
++ }
++
++ if (!delay) {
++ this.scheduleNow();
++ return false;
++ }
++ return true;
++ }
++
++ public void scheduleNow() {
++ this.loadDataFromDiskTask.schedule(); // will schedule the rest
++ }
++
++ // assumes the current stage cannot be completed
++ // returns false if cancelled, returns true if can proceed
++ private boolean advanceStage(final long expect, final long to) {
++ int failures = 0;
++ for (long curr = this.stageAndReferenceCount.get();;) {
++ if ((curr & STAGE_MASK) != expect) {
++ // must be cancelled
++ return false;
++ }
++
++ final long newVal = (curr & ~STAGE_MASK) | to;
++ if (curr == (curr = this.stageAndReferenceCount.compareAndExchange(curr, newVal))) {
++ return true;
++ }
++
++ ++failures;
++ for (int i = 0; i < failures; ++i) {
++ ConcurrentUtil.backoff();
++ }
++ }
++ }
++
++ public boolean cancel() {
++ int failures = 0;
++ for (long curr = this.stageAndReferenceCount.get();;) {
++ if ((curr & STAGE_MASK) == STAGE_COMPLETED || (curr & STAGE_MASK) == STAGE_CANCELLED) {
++ return false;
++ }
++
++ if ((curr & STAGE_MASK) == STAGE_NOT_STARTED || (curr & ~STAGE_MASK) == (1L << 32)) {
++ // no other references, so we can cancel
++ final long newVal = STAGE_CANCELLED;
++ if (curr == (curr = this.stageAndReferenceCount.compareAndExchange(curr, newVal))) {
++ this.loadDataFromDiskTask.cancel();
++ if (this.processOffMain != null) {
++ this.processOffMain.cancel();
++ }
++ if (this.processOnMain != null) {
++ this.processOnMain.cancel();
++ }
++ this.onComplete(null);
++ return true;
++ }
++ } else {
++ if ((curr & ~STAGE_MASK) == (0L << 32)) {
++ throw new IllegalStateException("Reference count cannot be zero here");
++ }
++ // just decrease the reference count
++ final long newVal = curr - (1L << 32);
++ if (curr == (curr = this.stageAndReferenceCount.compareAndExchange(curr, newVal))) {
++ return false;
++ }
++ }
++
++ ++failures;
++ for (int i = 0; i < failures; ++i) {
++ ConcurrentUtil.backoff();
++ }
++ }
++ }
++
++ protected final class DataLoadCallback implements BiConsumer {
++
++ protected final ProcessOffMainTask offMainTask;
++ protected final ProcessOnMainTask onMainTask;
++
++ public DataLoadCallback(final ProcessOffMainTask offMainTask, final ProcessOnMainTask onMainTask) {
++ this.offMainTask = offMainTask;
++ this.onMainTask = onMainTask;
++ }
++
++ @Override
++ public void accept(final CompoundTag compoundTag, final Throwable throwable) {
++ if (GenericDataLoadTask.this.stageAndReferenceCount.get() == STAGE_CANCELLED) {
++ // don't try to schedule further
++ return;
++ }
++
++ try {
++ if (compoundTag == CANCELLED_DATA) {
++ // cancelled, except this isn't possible
++ LOGGER.error("Data callback says cancelled, but stage does not?");
++ return;
++ }
++
++ // get off of the regionfile callback ASAP, no clue what locks are held right now...
++ if (GenericDataLoadTask.this.processOffMain != null) {
++ this.offMainTask.data = compoundTag;
++ this.offMainTask.throwable = throwable;
++ GenericDataLoadTask.this.processOffMain.queue();
++ return;
++ } else {
++ // no off-main task, so go straight to main
++ this.onMainTask.data = (OnMain)compoundTag;
++ this.onMainTask.throwable = throwable;
++ GenericDataLoadTask.this.processOnMain.queue();
++ }
++ } catch (final ThreadDeath death) {
++ throw death;
++ } catch (final Throwable thr2) {
++ LOGGER.error("Failed I/O callback for task: " + GenericDataLoadTask.this.toString(), thr2);
++ GenericDataLoadTask.this.scheduler.unrecoverableChunkSystemFailure(
++ GenericDataLoadTask.this.chunkX, GenericDataLoadTask.this.chunkZ, Map.of(
++ "Callback throwable", ChunkTaskScheduler.stringIfNull(throwable)
++ ), thr2);
++ }
++ }
++ }
++
++ protected final class ProcessOffMainTask implements Runnable {
++
++ protected CompoundTag data;
++ protected Throwable throwable;
++ protected final ProcessOnMainTask schedule;
++
++ public ProcessOffMainTask(final ProcessOnMainTask schedule) {
++ this.schedule = schedule;
++ }
++
++ @Override
++ public void run() {
++ if (!GenericDataLoadTask.this.advanceStage(STAGE_LOADING, this.schedule == null ? STAGE_COMPLETED : STAGE_PROCESSING)) {
++ // cancelled
++ return;
++ }
++ final TaskResult newData = GenericDataLoadTask.this.runOffMain(this.data, this.throwable);
++
++ if (GenericDataLoadTask.this.stageAndReferenceCount.get() == STAGE_CANCELLED) {
++ // don't try to schedule further
++ return;
++ }
++
++ if (this.schedule != null) {
++ final TaskResult syncComplete = GenericDataLoadTask.this.completeOnMainOffMain(newData.left, newData.right);
++
++ if (syncComplete != null) {
++ if (GenericDataLoadTask.this.advanceStage(STAGE_PROCESSING, STAGE_COMPLETED)) {
++ GenericDataLoadTask.this.onComplete(syncComplete);
++ } // else: cancelled
++ return;
++ }
++
++ this.schedule.data = newData.left;
++ this.schedule.throwable = newData.right;
++
++ GenericDataLoadTask.this.processOnMain.queue();
++ } else {
++ GenericDataLoadTask.this.onComplete((TaskResult)newData);
++ }
++ }
++ }
++
++ protected final class ProcessOnMainTask implements Runnable {
++
++ protected OnMain data;
++ protected Throwable throwable;
++
++ @Override
++ public void run() {
++ if (!GenericDataLoadTask.this.advanceStage(STAGE_PROCESSING, STAGE_COMPLETED)) {
++ // cancelled
++ return;
++ }
++ final TaskResult result = GenericDataLoadTask.this.runOnMain(this.data, this.throwable);
++
++ GenericDataLoadTask.this.onComplete(result);
++ }
++ }
++
++ public static final class LoadDataFromDiskTask {
++
++ protected volatile int priority;
++ protected static final VarHandle PRIORITY_HANDLE = ConcurrentUtil.getVarHandle(LoadDataFromDiskTask.class, "priority", int.class);
++
++ protected static final int PRIORITY_EXECUTED = Integer.MIN_VALUE >>> 0;
++ protected static final int PRIORITY_LOAD_SCHEDULED = Integer.MIN_VALUE >>> 1;
++ protected static final int PRIORITY_UNLOAD_SCHEDULED = Integer.MIN_VALUE >>> 2;
++
++ protected static final int PRIORITY_FLAGS = ~Character.MAX_VALUE;
++
++ protected final int getPriorityVolatile() {
++ return (int)PRIORITY_HANDLE.getVolatile((LoadDataFromDiskTask)this);
++ }
++
++ protected final int compareAndExchangePriorityVolatile(final int expect, final int update) {
++ return (int)PRIORITY_HANDLE.compareAndExchange((LoadDataFromDiskTask)this, (int)expect, (int)update);
++ }
++
++ protected final int getAndOrPriorityVolatile(final int val) {
++ return (int)PRIORITY_HANDLE.getAndBitwiseOr((LoadDataFromDiskTask)this, (int)val);
++ }
++
++ protected final void setPriorityPlain(final int val) {
++ PRIORITY_HANDLE.set((LoadDataFromDiskTask)this, (int)val);
++ }
++
++ private final ServerLevel world;
++ private final int chunkX;
++ private final int chunkZ;
++
++ private final RegionFileIOThread.RegionFileType type;
++ private Cancellable dataLoadTask;
++ private Cancellable dataUnloadCancellable;
++ private DelayedPrioritisedTask dataUnloadTask;
++
++ private final BiConsumer onComplete;
++
++ // onComplete should be caller sensitive, it may complete synchronously with schedule() - which does
++ // hold a priority lock.
++ public LoadDataFromDiskTask(final ServerLevel world, final int chunkX, final int chunkZ,
++ final RegionFileIOThread.RegionFileType type,
++ final BiConsumer onComplete,
++ final PrioritisedExecutor.Priority priority) {
++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority " + priority);
++ }
++ this.world = world;
++ this.chunkX = chunkX;
++ this.chunkZ = chunkZ;
++ this.type = type;
++ this.onComplete = onComplete;
++ this.setPriorityPlain(priority.priority);
++ }
++
++ private void complete(final CompoundTag data, final Throwable throwable) {
++ try {
++ this.onComplete.accept(data, throwable);
++ } catch (final Throwable thr2) {
++ this.world.chunkTaskScheduler.unrecoverableChunkSystemFailure(this.chunkX, this.chunkZ, Map.of(
++ "Completed throwable", ChunkTaskScheduler.stringIfNull(throwable),
++ "Regionfile type", ChunkTaskScheduler.stringIfNull(this.type)
++ ), thr2);
++ if (thr2 instanceof ThreadDeath) {
++ throw (ThreadDeath)thr2;
++ }
++ }
++ }
++
++ protected boolean markExecuting() {
++ return (this.getAndOrPriorityVolatile(PRIORITY_EXECUTED) & PRIORITY_EXECUTED) == 0;
++ }
++
++ protected boolean isMarkedExecuted() {
++ return (this.getPriorityVolatile() & PRIORITY_EXECUTED) != 0;
++ }
++
++ public void lowerPriority(final PrioritisedExecutor.Priority priority) {
++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority " + priority);
++ }
++
++ int failures = 0;
++ for (int curr = this.getPriorityVolatile();;) {
++ if ((curr & PRIORITY_EXECUTED) != 0) {
++ // cancelled or executed
++ return;
++ }
++
++ if ((curr & PRIORITY_LOAD_SCHEDULED) != 0) {
++ RegionFileIOThread.lowerPriority(this.world, this.chunkX, this.chunkZ, this.type, priority);
++ return;
++ }
++
++ if ((curr & PRIORITY_UNLOAD_SCHEDULED) != 0) {
++ if (this.dataUnloadTask != null) {
++ this.dataUnloadTask.lowerPriority(priority);
++ }
++ // no return - we need to propagate priority
++ }
++
++ if (!priority.isHigherPriority(curr & ~PRIORITY_FLAGS)) {
++ return;
++ }
++
++ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, priority.priority | (curr & PRIORITY_FLAGS)))) {
++ return;
++ }
++
++ // failed, retry
++
++ ++failures;
++ for (int i = 0; i < failures; ++i) {
++ ConcurrentUtil.backoff();
++ }
++ }
++ }
++
++ public void setPriority(final PrioritisedExecutor.Priority priority) {
++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority " + priority);
++ }
++
++ int failures = 0;
++ for (int curr = this.getPriorityVolatile();;) {
++ if ((curr & PRIORITY_EXECUTED) != 0) {
++ // cancelled or executed
++ return;
++ }
++
++ if ((curr & PRIORITY_LOAD_SCHEDULED) != 0) {
++ RegionFileIOThread.setPriority(this.world, this.chunkX, this.chunkZ, this.type, priority);
++ return;
++ }
++
++ if ((curr & PRIORITY_UNLOAD_SCHEDULED) != 0) {
++ if (this.dataUnloadTask != null) {
++ this.dataUnloadTask.setPriority(priority);
++ }
++ // no return - we need to propagate priority
++ }
++
++ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, priority.priority | (curr & PRIORITY_FLAGS)))) {
++ return;
++ }
++
++ // failed, retry
++
++ ++failures;
++ for (int i = 0; i < failures; ++i) {
++ ConcurrentUtil.backoff();
++ }
++ }
++ }
++
++ public void raisePriority(final PrioritisedExecutor.Priority priority) {
++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority " + priority);
++ }
++
++ int failures = 0;
++ for (int curr = this.getPriorityVolatile();;) {
++ if ((curr & PRIORITY_EXECUTED) != 0) {
++ // cancelled or executed
++ return;
++ }
++
++ if ((curr & PRIORITY_LOAD_SCHEDULED) != 0) {
++ RegionFileIOThread.raisePriority(this.world, this.chunkX, this.chunkZ, this.type, priority);
++ return;
++ }
++
++ if ((curr & PRIORITY_UNLOAD_SCHEDULED) != 0) {
++ if (this.dataUnloadTask != null) {
++ this.dataUnloadTask.raisePriority(priority);
++ }
++ // no return - we need to propagate priority
++ }
++
++ if (!priority.isLowerPriority(curr & ~PRIORITY_FLAGS)) {
++ return;
++ }
++
++ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, priority.priority | (curr & PRIORITY_FLAGS)))) {
++ return;
++ }
++
++ // failed, retry
++
++ ++failures;
++ for (int i = 0; i < failures; ++i) {
++ ConcurrentUtil.backoff();
++ }
++ }
++ }
++
++ public void cancel() {
++ if ((this.getAndOrPriorityVolatile(PRIORITY_EXECUTED) & PRIORITY_EXECUTED) != 0) {
++ // cancelled or executed already
++ return;
++ }
++
++ // OK if we miss the field read, the task cannot complete if the cancelled bit is set and
++ // the write to dataLoadTask will check for the cancelled bit
++ if (this.dataUnloadCancellable != null) {
++ this.dataUnloadCancellable.cancel();
++ }
++
++ if (this.dataLoadTask != null) {
++ this.dataLoadTask.cancel();
++ }
++
++ this.complete(CANCELLED_DATA, null);
++ }
++
++ private final AtomicBoolean scheduled = new AtomicBoolean();
++
++ public void schedule() {
++ if (this.scheduled.getAndSet(true)) {
++ throw new IllegalStateException("schedule() called twice");
++ }
++ int priority = this.getPriorityVolatile();
++
++ if ((priority & PRIORITY_EXECUTED) != 0) {
++ // cancelled
++ return;
++ }
++
++ final BiConsumer consumer = (final CompoundTag data, final Throwable thr) -> {
++ // because cancelScheduled() cannot actually stop this task from executing in every case, we need
++ // to mark complete here to ensure we do not double complete
++ if (LoadDataFromDiskTask.this.markExecuting()) {
++ LoadDataFromDiskTask.this.complete(data, thr);
++ } // else: cancelled
++ };
++
++ final PrioritisedExecutor.Priority initialPriority = PrioritisedExecutor.Priority.getPriority(priority);
++ boolean scheduledUnload = false;
++
++ final NewChunkHolder holder = this.world.chunkTaskScheduler.chunkHolderManager.getChunkHolder(this.chunkX, this.chunkZ);
++ if (holder != null) {
++ final BiConsumer unloadConsumer = (final CompoundTag data, final Throwable thr) -> {
++ if (data != null) {
++ consumer.accept(data, null);
++ } else {
++ // need to schedule task
++ LoadDataFromDiskTask.this.schedule(false, consumer, PrioritisedExecutor.Priority.getPriority(LoadDataFromDiskTask.this.getPriorityVolatile() & ~PRIORITY_FLAGS));
++ }
++ };
++ Cancellable unloadCancellable = null;
++ CompoundTag syncComplete = null;
++ final NewChunkHolder.UnloadTask unloadTask = holder.getUnloadTask(this.type); // can be null if no task exists
++ final Completable unloadCompletable = unloadTask == null ? null : unloadTask.completable();
++ if (unloadCompletable != null) {
++ unloadCancellable = unloadCompletable.addAsynchronousWaiter(unloadConsumer);
++ if (unloadCancellable == null) {
++ syncComplete = unloadCompletable.getResult();
++ }
++ }
++
++ if (syncComplete != null) {
++ consumer.accept(syncComplete, null);
++ return;
++ }
++
++ if (unloadCancellable != null) {
++ scheduledUnload = true;
++ this.dataUnloadCancellable = unloadCancellable;
++ this.dataUnloadTask = unloadTask.task();
++ }
++ }
++
++ this.schedule(scheduledUnload, consumer, initialPriority);
++ }
++
++ private void schedule(final boolean scheduledUnload, final BiConsumer consumer, final PrioritisedExecutor.Priority initialPriority) {
++ int priority = this.getPriorityVolatile();
++
++ if ((priority & PRIORITY_EXECUTED) != 0) {
++ // cancelled
++ return;
++ }
++
++ if (!scheduledUnload) {
++ this.dataLoadTask = RegionFileIOThread.loadDataAsync(
++ this.world, this.chunkX, this.chunkZ, this.type, consumer,
++ initialPriority.isHigherPriority(PrioritisedExecutor.Priority.NORMAL), initialPriority
++ );
++ }
++
++ int failures = 0;
++ for (;;) {
++ if (priority == (priority = this.compareAndExchangePriorityVolatile(priority, priority | (scheduledUnload ? PRIORITY_UNLOAD_SCHEDULED : PRIORITY_LOAD_SCHEDULED)))) {
++ return;
++ }
++
++ if ((priority & PRIORITY_EXECUTED) != 0) {
++ // cancelled or executed
++ if (this.dataUnloadCancellable != null) {
++ this.dataUnloadCancellable.cancel();
++ }
++
++ if (this.dataLoadTask != null) {
++ this.dataLoadTask.cancel();
++ }
++ return;
++ }
++
++ if (scheduledUnload) {
++ if (this.dataUnloadTask != null) {
++ this.dataUnloadTask.setPriority(PrioritisedExecutor.Priority.getPriority(priority & ~PRIORITY_FLAGS));
++ }
++ } else {
++ RegionFileIOThread.setPriority(this.world, this.chunkX, this.chunkZ, this.type, PrioritisedExecutor.Priority.getPriority(priority & ~PRIORITY_FLAGS));
++ }
++
++ ++failures;
++ for (int i = 0; i < failures; ++i) {
++ ConcurrentUtil.backoff();
++ }
++ }
++ }
++
++ /*
++ private static final class LoadDataPriorityHolder extends PriorityHolder {
++
++ protected final LoadDataFromDiskTask task;
++
++ protected LoadDataPriorityHolder(final PrioritisedExecutor.Priority priority, final LoadDataFromDiskTask task) {
++ super(priority);
++ this.task = task;
++ }
++
++ @Override
++ protected void cancelScheduled() {
++ final Cancellable dataLoadTask = this.task.dataLoadTask;
++ if (dataLoadTask != null) {
++ // OK if we miss the field read, the task cannot complete if the cancelled bit is set and
++ // the write to dataLoadTask will check for the cancelled bit
++ this.task.dataLoadTask.cancel();
++ }
++ this.task.complete(CANCELLED_DATA, null);
++ }
++
++ @Override
++ protected PrioritisedExecutor.Priority getScheduledPriority() {
++ final LoadDataFromDiskTask task = this.task;
++ return RegionFileIOThread.getPriority(task.world, task.chunkX, task.chunkZ, task.type);
++ }
++
++ @Override
++ protected void scheduleTask(final PrioritisedExecutor.Priority priority) {
++ final LoadDataFromDiskTask task = this.task;
++ final BiConsumer consumer = (final CompoundTag data, final Throwable thr) -> {
++ // because cancelScheduled() cannot actually stop this task from executing in every case, we need
++ // to mark complete here to ensure we do not double complete
++ if (LoadDataPriorityHolder.this.markExecuting()) {
++ LoadDataPriorityHolder.this.task.complete(data, thr);
++ } // else: cancelled
++ };
++ task.dataLoadTask = RegionFileIOThread.loadDataAsync(
++ task.world, task.chunkX, task.chunkZ, task.type, consumer,
++ priority.isHigherPriority(PrioritisedExecutor.Priority.NORMAL), priority
++ );
++ if (this.isMarkedExecuted()) {
++ // if we are marked as completed, it could be:
++ // 1. we were cancelled
++ // 2. the consumer was completed
++ // in the 2nd case, cancel() does nothing
++ // in the 1st case, we ensure cancel() is called as it is possible for the cancelling thread
++ // to miss the field write here
++ task.dataLoadTask.cancel();
++ }
++ }
++
++ @Override
++ protected void lowerPriorityScheduled(final PrioritisedExecutor.Priority priority) {
++ final LoadDataFromDiskTask task = this.task;
++ RegionFileIOThread.lowerPriority(task.world, task.chunkX, task.chunkZ, task.type, priority);
++ }
++
++ @Override
++ protected void setPriorityScheduled(final PrioritisedExecutor.Priority priority) {
++ final LoadDataFromDiskTask task = this.task;
++ RegionFileIOThread.setPriority(task.world, task.chunkX, task.chunkZ, task.type, priority);
++ }
++
++ @Override
++ protected void raisePriorityScheduled(final PrioritisedExecutor.Priority priority) {
++ final LoadDataFromDiskTask task = this.task;
++ RegionFileIOThread.raisePriority(task.world, task.chunkX, task.chunkZ, task.type, priority);
++ }
++ }
++ */
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/NewChunkHolder.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/NewChunkHolder.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..9a4ec0f1fb3bac0e84e6bd3aaeb77f44e248aadb
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/NewChunkHolder.java
+@@ -0,0 +1,2071 @@
++package io.papermc.paper.chunk.system.scheduling;
++
++import ca.spottedleaf.concurrentutil.completable.Completable;
++import ca.spottedleaf.concurrentutil.executor.Cancellable;
++import ca.spottedleaf.concurrentutil.executor.standard.DelayedPrioritisedTask;
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
++import com.google.gson.JsonArray;
++import com.google.gson.JsonElement;
++import com.google.gson.JsonObject;
++import com.google.gson.JsonPrimitive;
++import com.mojang.logging.LogUtils;
++import io.papermc.paper.chunk.system.io.RegionFileIOThread;
++import io.papermc.paper.chunk.system.poi.PoiChunk;
++import io.papermc.paper.util.CoordinateUtils;
++import io.papermc.paper.util.TickThread;
++import io.papermc.paper.util.WorldUtil;
++import io.papermc.paper.world.ChunkEntitySlices;
++import it.unimi.dsi.fastutil.objects.Reference2ObjectLinkedOpenHashMap;
++import it.unimi.dsi.fastutil.objects.Reference2ObjectMap;
++import it.unimi.dsi.fastutil.objects.Reference2ObjectOpenHashMap;
++import it.unimi.dsi.fastutil.objects.ReferenceLinkedOpenHashSet;
++import net.minecraft.nbt.CompoundTag;
++import net.minecraft.server.level.ChunkHolder;
++import net.minecraft.server.level.ChunkMap;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.server.level.TicketType;
++import net.minecraft.world.entity.Entity;
++import net.minecraft.world.level.ChunkPos;
++import net.minecraft.world.level.chunk.ChunkAccess;
++import net.minecraft.world.level.chunk.ChunkStatus;
++import net.minecraft.world.level.chunk.ImposterProtoChunk;
++import net.minecraft.world.level.chunk.LevelChunk;
++import net.minecraft.world.level.chunk.storage.ChunkSerializer;
++import net.minecraft.world.level.chunk.storage.EntityStorage;
++import org.slf4j.Logger;
++import java.lang.invoke.VarHandle;
++import java.util.ArrayList;
++import java.util.Iterator;
++import java.util.List;
++import java.util.Map;
++import java.util.Objects;
++import java.util.concurrent.atomic.AtomicBoolean;
++import java.util.function.Consumer;
++
++public final class NewChunkHolder {
++
++ private static final Logger LOGGER = LogUtils.getLogger();
++
++ public static final Thread.UncaughtExceptionHandler CHUNKSYSTEM_UNCAUGHT_EXCEPTION_HANDLER = new Thread.UncaughtExceptionHandler() {
++ @Override
++ public void uncaughtException(final Thread thread, final Throwable throwable) {
++ if (!(throwable instanceof ThreadDeath)) {
++ LOGGER.error("Uncaught exception in thread " + thread.getName(), throwable);
++ }
++ }
++ };
++
++ public final ServerLevel world;
++ public final int chunkX;
++ public final int chunkZ;
++
++ public final ChunkTaskScheduler scheduler;
++
++ // load/unload state
++
++ // chunk data state
++
++ private ChunkEntitySlices entityChunk;
++ // entity chunk that is loaded, but not yet deserialized
++ private CompoundTag pendingEntityChunk;
++
++ ChunkEntitySlices loadInEntityChunk(final boolean transientChunk) {
++ TickThread.ensureTickThread(this.world, this.chunkX, this.chunkZ, "Cannot sync load entity data off-main");
++ final CompoundTag entityChunk;
++ final ChunkEntitySlices ret;
++ this.scheduler.schedulingLock.lock();
++ try {
++ if (this.entityChunk != null && (transientChunk || !this.entityChunk.isTransient())) {
++ return this.entityChunk;
++ }
++ final CompoundTag pendingEntityChunk = this.pendingEntityChunk;
++ if (!transientChunk && pendingEntityChunk == null) {
++ throw new IllegalStateException("Must load entity data from disk before loading in the entity chunk!");
++ }
++
++ if (this.entityChunk == null) {
++ ret = this.entityChunk = new ChunkEntitySlices(
++ this.world, this.chunkX, this.chunkZ, this.getChunkStatus(),
++ WorldUtil.getMinSection(this.world), WorldUtil.getMaxSection(this.world)
++ );
++
++ ret.setTransient(transientChunk);
++
++ this.world.getEntityLookup().entitySectionLoad(this.chunkX, this.chunkZ, ret);
++ } else {
++ // transientChunk = false here
++ ret = this.entityChunk;
++ this.entityChunk.setTransient(false);
++ }
++
++ if (!transientChunk) {
++ this.pendingEntityChunk = null;
++ entityChunk = pendingEntityChunk == EMPTY_ENTITY_CHUNK ? null : pendingEntityChunk;
++ } else {
++ entityChunk = null;
++ }
++ } finally {
++ this.scheduler.schedulingLock.unlock();
++ }
++
++ if (!transientChunk) {
++ if (entityChunk != null) {
++ final List entities = EntityStorage.readEntities(this.world, entityChunk);
++
++ this.world.getEntityLookup().addEntityChunkEntities(entities);
++ }
++ }
++
++ return ret;
++ }
++
++ // needed to distinguish whether the entity chunk has been read from disk but is empty or whether it has _not_
++ // been read from disk
++ private static final CompoundTag EMPTY_ENTITY_CHUNK = new CompoundTag();
++
++ private ChunkLoadTask.EntityDataLoadTask entityDataLoadTask;
++ // note: if entityDataLoadTask is cancelled, but on its completion entityDataLoadTaskWaiters.size() != 0,
++ // then the task is rescheduled
++ private List entityDataLoadTaskWaiters;
++
++ public ChunkLoadTask.EntityDataLoadTask getEntityDataLoadTask() {
++ return this.entityDataLoadTask;
++ }
++
++ // must hold schedule lock for the two below functions
++
++ // returns only if the data has been loaded from disk, DOES NOT relate to whether it has been deserialized
++ // or added into the world (or even into entityChunk)
++ public boolean isEntityChunkNBTLoaded() {
++ return (this.entityChunk != null && !this.entityChunk.isTransient()) || this.pendingEntityChunk != null;
++ }
++
++ private void completeEntityLoad(final GenericDataLoadTask.TaskResult result) {
++ final List completeWaiters;
++ ChunkLoadTask.EntityDataLoadTask entityDataLoadTask = null;
++ boolean scheduleEntityTask = false;
++ this.scheduler.schedulingLock.lock();
++ try {
++ final List waiters = this.entityDataLoadTaskWaiters;
++ this.entityDataLoadTask = null;
++ if (result != null) {
++ this.entityDataLoadTaskWaiters = null;
++ this.pendingEntityChunk = result.left() == null ? EMPTY_ENTITY_CHUNK : result.left();
++ if (result.right() != null) {
++ LOGGER.error("Unhandled entity data load exception, data data will be lost: ", result.right());
++ }
++
++ completeWaiters = waiters;
++ } else {
++ // cancelled
++ completeWaiters = null;
++
++ // need to re-schedule?
++ if (waiters.isEmpty()) {
++ this.entityDataLoadTaskWaiters = null;
++ // no tasks to schedule _for_
++ } else {
++ entityDataLoadTask = this.entityDataLoadTask = new ChunkLoadTask.EntityDataLoadTask(
++ this.scheduler, this.world, this.chunkX, this.chunkZ, this.getEffectivePriority()
++ );
++ entityDataLoadTask.addCallback(this::completeEntityLoad);
++ // need one schedule() per waiter
++ for (final GenericDataLoadTaskCallback callback : waiters) {
++ scheduleEntityTask |= entityDataLoadTask.schedule(true);
++ }
++ }
++ }
++ } finally {
++ this.scheduler.schedulingLock.unlock();
++ }
++
++ if (scheduleEntityTask) {
++ entityDataLoadTask.scheduleNow();
++ }
++
++ // avoid holding the scheduling lock while completing
++ if (completeWaiters != null) {
++ for (final GenericDataLoadTaskCallback callback : completeWaiters) {
++ callback.accept(result);
++ }
++ }
++
++ this.scheduler.schedulingLock.lock();
++ try {
++ this.checkUnload();
++ } finally {
++ this.scheduler.schedulingLock.unlock();
++ }
++ }
++
++ // note: it is guaranteed that the consumer cannot be called for the entirety that the schedule lock is held
++ // however, when the consumer is invoked, it will hold the schedule lock
++ public GenericDataLoadTaskCallback getOrLoadEntityData(final Consumer> consumer) {
++ if (this.isEntityChunkNBTLoaded()) {
++ throw new IllegalStateException("Cannot load entity data, it is already loaded");
++ }
++ // why not just acquire the lock? because the caller NEEDS to call isEntityChunkNBTLoaded before this!
++ if (!this.scheduler.schedulingLock.isHeldByCurrentThread()) {
++ throw new IllegalStateException("Must hold scheduling lock");
++ }
++
++ final GenericDataLoadTaskCallback ret = new EntityDataLoadTaskCallback((Consumer)consumer, this);
++
++ if (this.entityDataLoadTask == null) {
++ this.entityDataLoadTask = new ChunkLoadTask.EntityDataLoadTask(
++ this.scheduler, this.world, this.chunkX, this.chunkZ, this.getEffectivePriority()
++ );
++ this.entityDataLoadTask.addCallback(this::completeEntityLoad);
++ this.entityDataLoadTaskWaiters = new ArrayList<>();
++ }
++ this.entityDataLoadTaskWaiters.add(ret);
++ if (this.entityDataLoadTask.schedule(true)) {
++ ret.schedule = this.entityDataLoadTask;
++ }
++ this.checkUnload();
++
++ return ret;
++ }
++
++ private static final class EntityDataLoadTaskCallback extends GenericDataLoadTaskCallback {
++
++ public EntityDataLoadTaskCallback(final Consumer> consumer, final NewChunkHolder chunkHolder) {
++ super(consumer, chunkHolder);
++ }
++
++ @Override
++ void internalCancel() {
++ this.chunkHolder.entityDataLoadTaskWaiters.remove(this);
++ this.chunkHolder.entityDataLoadTask.cancel();
++ }
++ }
++
++ private PoiChunk poiChunk;
++
++ private ChunkLoadTask.PoiDataLoadTask poiDataLoadTask;
++ // note: if entityDataLoadTask is cancelled, but on its completion entityDataLoadTaskWaiters.size() != 0,
++ // then the task is rescheduled
++ private List poiDataLoadTaskWaiters;
++
++ public ChunkLoadTask.PoiDataLoadTask getPoiDataLoadTask() {
++ return this.poiDataLoadTask;
++ }
++
++ // must hold schedule lock for the two below functions
++
++ public boolean isPoiChunkLoaded() {
++ return this.poiChunk != null;
++ }
++
++ private void completePoiLoad(final GenericDataLoadTask.TaskResult result) {
++ final List completeWaiters;
++ ChunkLoadTask.PoiDataLoadTask poiDataLoadTask = null;
++ boolean schedulePoiTask = false;
++ this.scheduler.schedulingLock.lock();
++ try {
++ final List waiters = this.poiDataLoadTaskWaiters;
++ this.poiDataLoadTask = null;
++ if (result != null) {
++ this.poiDataLoadTaskWaiters = null;
++ this.poiChunk = result.left();
++ if (result.right() != null) {
++ LOGGER.error("Unhandled poi load exception, poi data will be lost: ", result.right());
++ }
++
++ completeWaiters = waiters;
++ } else {
++ // cancelled
++ completeWaiters = null;
++
++ // need to re-schedule?
++ if (waiters.isEmpty()) {
++ this.poiDataLoadTaskWaiters = null;
++ // no tasks to schedule _for_
++ } else {
++ poiDataLoadTask = this.poiDataLoadTask = new ChunkLoadTask.PoiDataLoadTask(
++ this.scheduler, this.world, this.chunkX, this.chunkZ, this.getEffectivePriority()
++ );
++ poiDataLoadTask.addCallback(this::completePoiLoad);
++ // need one schedule() per waiter
++ for (final GenericDataLoadTaskCallback callback : waiters) {
++ schedulePoiTask |= poiDataLoadTask.schedule(true);
++ }
++ }
++ }
++ } finally {
++ this.scheduler.schedulingLock.unlock();
++ }
++
++ if (schedulePoiTask) {
++ poiDataLoadTask.scheduleNow();
++ }
++
++ // avoid holding the scheduling lock while completing
++ if (completeWaiters != null) {
++ for (final GenericDataLoadTaskCallback callback : completeWaiters) {
++ callback.accept(result);
++ }
++ }
++ this.scheduler.schedulingLock.lock();
++ try {
++ this.checkUnload();
++ } finally {
++ this.scheduler.schedulingLock.unlock();
++ }
++ }
++
++ // note: it is guaranteed that the consumer cannot be called for the entirety that the schedule lock is held
++ // however, when the consumer is invoked, it will hold the schedule lock
++ public GenericDataLoadTaskCallback getOrLoadPoiData(final Consumer> consumer) {
++ if (this.isPoiChunkLoaded()) {
++ throw new IllegalStateException("Cannot load poi data, it is already loaded");
++ }
++ // why not just acquire the lock? because the caller NEEDS to call isPoiChunkLoaded before this!
++ if (!this.scheduler.schedulingLock.isHeldByCurrentThread()) {
++ throw new IllegalStateException("Must hold scheduling lock");
++ }
++
++ final GenericDataLoadTaskCallback ret = new PoiDataLoadTaskCallback((Consumer)consumer, this);
++
++ if (this.poiDataLoadTask == null) {
++ this.poiDataLoadTask = new ChunkLoadTask.PoiDataLoadTask(
++ this.scheduler, this.world, this.chunkX, this.chunkZ, this.getEffectivePriority()
++ );
++ this.poiDataLoadTask.addCallback(this::completePoiLoad);
++ this.poiDataLoadTaskWaiters = new ArrayList<>();
++ }
++ this.poiDataLoadTaskWaiters.add(ret);
++ if (this.poiDataLoadTask.schedule(true)) {
++ ret.schedule = this.poiDataLoadTask;
++ }
++ this.checkUnload();
++
++ return ret;
++ }
++
++ private static final class PoiDataLoadTaskCallback extends GenericDataLoadTaskCallback {
++
++ public PoiDataLoadTaskCallback(final Consumer> consumer, final NewChunkHolder chunkHolder) {
++ super(consumer, chunkHolder);
++ }
++
++ @Override
++ void internalCancel() {
++ this.chunkHolder.poiDataLoadTaskWaiters.remove(this);
++ this.chunkHolder.poiDataLoadTask.cancel();
++ }
++ }
++
++ public static abstract class GenericDataLoadTaskCallback implements Cancellable, Consumer> {
++
++ protected final Consumer> consumer;
++ protected final NewChunkHolder chunkHolder;
++ protected boolean completed;
++ protected GenericDataLoadTask, ?> schedule;
++ protected final AtomicBoolean scheduled = new AtomicBoolean();
++
++ public GenericDataLoadTaskCallback(final Consumer> consumer,
++ final NewChunkHolder chunkHolder) {
++ this.consumer = consumer;
++ this.chunkHolder = chunkHolder;
++ }
++
++ public void schedule() {
++ if (this.scheduled.getAndSet(true)) {
++ throw new IllegalStateException("Double calling schedule()");
++ }
++ if (this.schedule != null) {
++ this.schedule.scheduleNow();
++ this.schedule = null;
++ }
++ }
++
++ boolean isCompleted() {
++ return this.completed;
++ }
++
++ // must hold scheduling lock
++ private boolean setCompleted() {
++ if (this.completed) {
++ return false;
++ }
++ return this.completed = true;
++ }
++
++ @Override
++ public void accept(final GenericDataLoadTask.TaskResult, Throwable> result) {
++ if (result != null) {
++ if (this.setCompleted()) {
++ this.consumer.accept(result);
++ } else {
++ throw new IllegalStateException("Cannot be cancelled at this point");
++ }
++ } else {
++ throw new NullPointerException("Result cannot be null (cancelled)");
++ }
++ }
++
++ // holds scheduling lock
++ abstract void internalCancel();
++
++ @Override
++ public boolean cancel() {
++ this.chunkHolder.scheduler.schedulingLock.lock();
++ try {
++ if (!this.completed) {
++ this.completed = true;
++ this.internalCancel();
++ return true;
++ }
++ return false;
++ } finally {
++ this.chunkHolder.scheduler.schedulingLock.unlock();
++ }
++ }
++ }
++
++ private ChunkAccess currentChunk;
++
++ // generation status state
++
++ /**
++ * Current status the chunk has been brought up to by the chunk system. null indicates no work at all
++ */
++ private ChunkStatus currentGenStatus;
++
++ // This allows unsynchronised access to the chunk and last gen status
++ private volatile ChunkCompletion lastChunkCompletion;
++
++ public ChunkCompletion getLastChunkCompletion() {
++ return this.lastChunkCompletion;
++ }
++
++ public static final record ChunkCompletion(ChunkAccess chunk, ChunkStatus genStatus) {};
++
++ /**
++ * The target final chunk status the chunk system will bring the chunk to.
++ */
++ private ChunkStatus requestedGenStatus;
++
++ private ChunkProgressionTask generationTask;
++ private ChunkStatus generationTaskStatus;
++
++ /**
++ * contains the neighbours that this chunk generation is blocking on
++ */
++ protected final ReferenceLinkedOpenHashSet neighboursBlockingGenTask = new ReferenceLinkedOpenHashSet<>(4);
++
++ /**
++ * map of ChunkHolder -> Required Status for this chunk
++ */
++ protected final Reference2ObjectLinkedOpenHashMap neighboursWaitingForUs = new Reference2ObjectLinkedOpenHashMap<>();
++
++ public void addGenerationBlockingNeighbour(final NewChunkHolder neighbour) {
++ this.neighboursBlockingGenTask.add(neighbour);
++ }
++
++ public void addWaitingNeighbour(final NewChunkHolder neighbour, final ChunkStatus requiredStatus) {
++ final boolean wasEmpty = this.neighboursWaitingForUs.isEmpty();
++ this.neighboursWaitingForUs.put(neighbour, requiredStatus);
++ if (wasEmpty) {
++ this.checkUnload();
++ }
++ }
++
++ // priority state
++
++ // the target priority for this chunk to generate at
++ // TODO this will screw over scheduling at lower priorities to neighbours, fix
++ private PrioritisedExecutor.Priority priority = PrioritisedExecutor.Priority.NORMAL;
++ private boolean priorityLocked;
++
++ // the priority neighbouring chunks have requested this chunk generate at
++ private PrioritisedExecutor.Priority neighbourRequestedPriority = PrioritisedExecutor.Priority.IDLE;
++
++ public PrioritisedExecutor.Priority getEffectivePriority() {
++ return PrioritisedExecutor.Priority.max(this.priority, this.neighbourRequestedPriority);
++ }
++
++ protected void recalculateNeighbourRequestedPriority() {
++ if (this.neighboursWaitingForUs.isEmpty()) {
++ this.neighbourRequestedPriority = PrioritisedExecutor.Priority.IDLE;
++ return;
++ }
++
++ PrioritisedExecutor.Priority max = PrioritisedExecutor.Priority.IDLE;
++
++ for (final NewChunkHolder holder : this.neighboursWaitingForUs.keySet()) {
++ final PrioritisedExecutor.Priority neighbourPriority = holder.getEffectivePriority();
++ if (neighbourPriority.isHigherPriority(max)) {
++ max = neighbourPriority;
++ }
++ }
++
++ final PrioritisedExecutor.Priority current = this.getEffectivePriority();
++ this.neighbourRequestedPriority = max;
++ final PrioritisedExecutor.Priority next = this.getEffectivePriority();
++
++ if (current == next) {
++ return;
++ }
++
++ // our effective priority has changed, so change our task
++ if (this.generationTask != null) {
++ this.generationTask.setPriority(next);
++ }
++
++ // now propagate this to our neighbours
++ this.recalculateNeighbourPriorities();
++ }
++
++ public void recalculateNeighbourPriorities() {
++ for (final NewChunkHolder holder : this.neighboursBlockingGenTask) {
++ holder.recalculateNeighbourRequestedPriority();
++ }
++ }
++
++ // must hold scheduling lock
++ public void raisePriority(final PrioritisedExecutor.Priority priority) {
++ if (this.priority != null && this.priority.isHigherOrEqualPriority(priority)) {
++ return;
++ }
++ this.setPriority(priority);
++ }
++
++ private void lockPriority() {
++ this.priority = PrioritisedExecutor.Priority.NORMAL;
++ this.priorityLocked = true;
++ }
++
++ // must hold scheduling lock
++ public void setPriority(final PrioritisedExecutor.Priority priority) {
++ if (this.priorityLocked) {
++ return;
++ }
++ final PrioritisedExecutor.Priority old = this.getEffectivePriority();
++ this.priority = priority;
++ final PrioritisedExecutor.Priority newPriority = this.getEffectivePriority();
++
++ if (old != newPriority) {
++ if (this.generationTask != null) {
++ this.generationTask.setPriority(newPriority);
++ }
++ }
++
++ this.recalculateNeighbourPriorities();
++ }
++
++ // must hold scheduling lock
++ public void lowerPriority(final PrioritisedExecutor.Priority priority) {
++ if (this.priority != null && this.priority.isLowerOrEqualPriority(priority)) {
++ return;
++ }
++ this.setPriority(priority);
++ }
++
++ // error handling state
++ private ChunkStatus failedGenStatus;
++ private Throwable genTaskException;
++ private Thread genTaskFailedThread;
++
++ private boolean failedLightUpdate;
++
++ public void failedLightUpdate() {
++ this.failedLightUpdate = true;
++ }
++
++ public boolean hasFailedGeneration() {
++ return this.genTaskException != null;
++ }
++
++ // ticket level state
++ private int oldTicketLevel = ChunkMap.MAX_CHUNK_DISTANCE + 1;
++ private int currentTicketLevel = ChunkMap.MAX_CHUNK_DISTANCE + 1;
++
++ public int getTicketLevel() {
++ return this.currentTicketLevel;
++ }
++
++ public final ChunkHolder vanillaChunkHolder;
++
++ public NewChunkHolder(final ServerLevel world, final int chunkX, final int chunkZ, final ChunkTaskScheduler scheduler) {
++ this.world = world;
++ this.chunkX = chunkX;
++ this.chunkZ = chunkZ;
++ this.scheduler = scheduler;
++ this.vanillaChunkHolder = new ChunkHolder(new ChunkPos(chunkX, chunkZ), world, world.getLightEngine(), world.chunkSource.chunkMap, this);
++ }
++
++ protected ImposterProtoChunk wrappedChunkForNeighbour;
++
++ // holds scheduling lock
++ public ChunkAccess getChunkForNeighbourAccess() {
++ // Vanilla overrides the status futures with an imposter chunk to prevent writes to full chunks
++ // But we don't store per-status futures, so we need this hack
++ if (this.wrappedChunkForNeighbour != null) {
++ return this.wrappedChunkForNeighbour;
++ }
++ final ChunkAccess ret = this.currentChunk;
++ return ret instanceof LevelChunk fullChunk ? this.wrappedChunkForNeighbour = new ImposterProtoChunk(fullChunk, false) : ret;
++ }
++
++ public ChunkAccess getCurrentChunk() {
++ return this.currentChunk;
++ }
++
++ int getCurrentTicketLevel() {
++ return this.currentTicketLevel;
++ }
++
++ void updateTicketLevel(final int toLevel) {
++ this.currentTicketLevel = toLevel;
++ }
++
++ private int totalNeighboursUsingThisChunk = 0;
++
++ // holds schedule lock
++ public void addNeighbourUsingChunk() {
++ final int now = ++this.totalNeighboursUsingThisChunk;
++
++ if (now == 1) {
++ this.checkUnload();
++ }
++ }
++
++ // holds schedule lock
++ public void removeNeighbourUsingChunk() {
++ final int now = --this.totalNeighboursUsingThisChunk;
++
++ if (now == 0) {
++ this.checkUnload();
++ }
++
++ if (now < 0) {
++ throw new IllegalStateException("Neighbours using this chunk cannot be negative");
++ }
++ }
++
++ // must hold scheduling lock
++ // returns string reason for why chunk should remain loaded, null otherwise
++ public final String isSafeToUnload() {
++ // is ticket level below threshold?
++ if (this.oldTicketLevel <= ChunkHolderManager.MAX_TICKET_LEVEL) {
++ return "ticket_level";
++ }
++
++ // are we being used by another chunk for generation?
++ if (this.totalNeighboursUsingThisChunk != 0) {
++ return "neighbours_generating";
++ }
++
++ // are we going to be used by another chunk for generation?
++ if (!this.neighboursWaitingForUs.isEmpty()) {
++ return "neighbours_waiting";
++ }
++
++ // chunk must be marked inaccessible (i.e unloaded to plugins)
++ if (this.getChunkStatus() != ChunkHolder.FullChunkStatus.INACCESSIBLE) {
++ return "fullchunkstatus";
++ }
++
++ // are we currently generating anything, or have requested generation?
++ if (this.generationTask != null) {
++ return "generating";
++ }
++ if (this.requestedGenStatus != null) {
++ return "requested_generation";
++ }
++
++ // entity data requested?
++ if (this.entityDataLoadTask != null) {
++ return "entity_data_requested";
++ }
++
++ // poi data requested?
++ if (this.poiDataLoadTask != null) {
++ return "poi_data_requested";
++ }
++
++ // are we pending serialization?
++ if (this.entityDataUnload != null) {
++ return "entity_serialization";
++ }
++ if (this.poiDataUnload != null) {
++ return "poi_serialization";
++ }
++ if (this.chunkDataUnload != null) {
++ return "chunk_serialization";
++ }
++
++ // Note: light tasks do not need a check, as they add a ticket.
++
++ // nothing is using this chunk, so it should be unloaded
++ return null;
++ }
++
++ /** Unloaded from chunk map */
++ boolean killed;
++
++ // must hold scheduling lock
++ private void checkUnload() {
++ if (this.killed) {
++ return;
++ }
++ if (this.isSafeToUnload() == null) {
++ // ensure in unload queue
++ this.scheduler.chunkHolderManager.unloadQueue.add(this);
++ } else {
++ // ensure not in unload queue
++ this.scheduler.chunkHolderManager.unloadQueue.remove(this);
++ }
++ }
++
++ static final record UnloadState(NewChunkHolder holder, ChunkAccess chunk, ChunkEntitySlices entityChunk, PoiChunk poiChunk) {};
++
++ // note: these are completed with null to indicate that no write occurred
++ // they are also completed with null to indicate a null write occurred
++ private UnloadTask chunkDataUnload;
++ private UnloadTask entityDataUnload;
++ private UnloadTask poiDataUnload;
++
++ public static final record UnloadTask(Completable completable, DelayedPrioritisedTask task) {}
++
++ public UnloadTask getUnloadTask(final RegionFileIOThread.RegionFileType type) {
++ switch (type) {
++ case CHUNK_DATA:
++ return this.chunkDataUnload;
++ case ENTITY_DATA:
++ return this.entityDataUnload;
++ case POI_DATA:
++ return this.poiDataUnload;
++ default:
++ throw new IllegalStateException("Unknown regionfile type " + type);
++ }
++ }
++
++ private UnloadState unloadState;
++
++ // holds schedule lock
++ UnloadState unloadStage1() {
++ // because we hold the scheduling lock, we cannot actually unload anything
++ // so we need to null this chunk's state
++ ChunkAccess chunk = this.currentChunk;
++ ChunkEntitySlices entityChunk = this.entityChunk;
++ PoiChunk poiChunk = this.poiChunk;
++ // chunk state
++ this.currentChunk = null;
++ this.currentGenStatus = null;
++ this.wrappedChunkForNeighbour = null;
++ this.lastChunkCompletion = null;
++ // entity chunk state
++ this.entityChunk = null;
++ this.pendingEntityChunk = null;
++
++ // poi chunk state
++ this.poiChunk = null;
++
++ // priority state
++ this.priorityLocked = false;
++
++ if (chunk != null) {
++ this.chunkDataUnload = new UnloadTask(new Completable<>(), new DelayedPrioritisedTask(PrioritisedExecutor.Priority.NORMAL));
++ }
++ if (poiChunk != null) {
++ this.poiDataUnload = new UnloadTask(new Completable<>(), null);
++ }
++ if (entityChunk != null) {
++ this.entityDataUnload = new UnloadTask(new Completable<>(), null);
++ }
++
++ return this.unloadState = (chunk != null || entityChunk != null || poiChunk != null) ? new UnloadState(this, chunk, entityChunk, poiChunk) : null;
++ }
++
++ // data is null if failed or does not need to be saved
++ void completeAsyncChunkDataSave(final CompoundTag data) {
++ if (data != null) {
++ RegionFileIOThread.scheduleSave(this.world, this.chunkX, this.chunkZ, data, RegionFileIOThread.RegionFileType.CHUNK_DATA);
++ }
++ this.chunkDataUnload.completable().complete(data);
++ this.scheduler.schedulingLock.lock();
++ try {
++ // can only write to these fields while holding the schedule lock
++ this.chunkDataUnload = null;
++ this.checkUnload();
++ } finally {
++ this.scheduler.schedulingLock.unlock();
++ }
++ }
++
++ void unloadStage2(final UnloadState state) {
++ this.unloadState = null;
++ final ChunkAccess chunk = state.chunk();
++ final ChunkEntitySlices entityChunk = state.entityChunk();
++ final PoiChunk poiChunk = state.poiChunk();
++
++ final boolean shouldLevelChunkNotSave = (chunk instanceof LevelChunk levelChunk && levelChunk.mustNotSave);
++
++ // unload chunk data
++ if (chunk != null) {
++ if (chunk instanceof LevelChunk levelChunk) {
++ levelChunk.setLoaded(false);
++ }
++
++ if (!shouldLevelChunkNotSave) {
++ this.saveChunk(chunk, true);
++ } else {
++ this.completeAsyncChunkDataSave(null);
++ }
++
++ if (chunk instanceof LevelChunk levelChunk) {
++ this.world.unload(levelChunk);
++ }
++ }
++
++ // unload entity data
++ if (entityChunk != null) {
++ this.saveEntities(entityChunk, true);
++ // yes this is a hack to pass the compound tag through...
++ final CompoundTag lastEntityUnload = this.lastEntityUnload;
++ this.lastEntityUnload = null;
++
++ if (entityChunk.unload()) {
++ this.scheduler.schedulingLock.lock();
++ try {
++ entityChunk.setTransient(true);
++ this.entityChunk = entityChunk;
++ } finally {
++ this.scheduler.schedulingLock.unlock();
++ }
++ } else {
++ this.world.getEntityLookup().entitySectionUnload(this.chunkX, this.chunkZ);
++ }
++ // we need to delay the callback until after determining transience, otherwise a potential loader could
++ // set entityChunk before we do
++ this.entityDataUnload.completable().complete(lastEntityUnload);
++ }
++
++ // unload poi data
++ if (poiChunk != null) {
++ if (poiChunk.isDirty() && !shouldLevelChunkNotSave) {
++ this.savePOI(poiChunk, true);
++ } else {
++ this.poiDataUnload.completable().complete(null);
++ }
++
++ if (poiChunk.isLoaded()) {
++ this.world.getPoiManager().onUnload(CoordinateUtils.getChunkKey(this.chunkX, this.chunkZ));
++ }
++ }
++ }
++
++ boolean unloadStage3() {
++ // can only write to these while holding the schedule lock, and we instantly complete them in stage2
++ this.poiDataUnload = null;
++ this.entityDataUnload = null;
++
++ // we need to check if anything has been loaded in the meantime (or if we have transient entities)
++ if (this.entityChunk != null || this.poiChunk != null || this.currentChunk != null) {
++ return false;
++ }
++
++ return this.isSafeToUnload() == null;
++ }
++
++ private void cancelGenTask() {
++ if (this.generationTask != null) {
++ this.generationTask.cancel();
++ } else {
++ // otherwise, we are blocking on neighbours, so remove them
++ if (!this.neighboursBlockingGenTask.isEmpty()) {
++ for (final NewChunkHolder neighbour : this.neighboursBlockingGenTask) {
++ if (neighbour.neighboursWaitingForUs.remove(this) == null) {
++ throw new IllegalStateException("Corrupt state");
++ }
++ if (neighbour.neighboursWaitingForUs.isEmpty()) {
++ neighbour.checkUnload();
++ }
++ }
++ this.neighboursBlockingGenTask.clear();
++ this.checkUnload();
++ }
++ }
++ }
++
++ // holds: ticket level update lock
++ // holds: schedule lock
++ public void processTicketLevelUpdate(final List scheduledTasks, final List changedLoadStatus) {
++ final int oldLevel = this.oldTicketLevel;
++ final int newLevel = this.currentTicketLevel;
++
++ if (oldLevel == newLevel) {
++ return;
++ }
++
++ this.oldTicketLevel = newLevel;
++
++ final ChunkHolder.FullChunkStatus oldState = ChunkHolder.getFullChunkStatus(oldLevel);
++ final ChunkHolder.FullChunkStatus newState = ChunkHolder.getFullChunkStatus(newLevel);
++ final boolean oldUnloaded = oldLevel > ChunkHolderManager.MAX_TICKET_LEVEL;
++ final boolean newUnloaded = newLevel > ChunkHolderManager.MAX_TICKET_LEVEL;
++
++ final ChunkStatus maxGenerationStatusOld = ChunkHolder.getStatus(oldLevel);
++ final ChunkStatus maxGenerationStatusNew = ChunkHolder.getStatus(newLevel);
++
++ // check for cancellations from downgrading ticket level
++ if (this.requestedGenStatus != null && !newState.isOrAfter(ChunkHolder.FullChunkStatus.BORDER) && newLevel > oldLevel) {
++ // note: cancel() may invoke onChunkGenComplete synchronously here
++ if (newUnloaded) {
++ // need to cancel all tasks
++ // note: requested status must be set to null here before cancellation, to indicate to the
++ // completion logic that we do not want rescheduling to occur
++ this.requestedGenStatus = null;
++ this.cancelGenTask();
++ } else {
++ final ChunkStatus toCancel = maxGenerationStatusNew.getNextStatus();
++ final ChunkStatus currentRequestedStatus = this.requestedGenStatus;
++
++ if (currentRequestedStatus.isOrAfter(toCancel)) {
++ // we do have to cancel something here
++ // clamp requested status to the maximum
++ if (this.currentGenStatus != null && this.currentGenStatus.isOrAfter(maxGenerationStatusNew)) {
++ // already generated to status, so we must cancel
++ this.requestedGenStatus = null;
++ this.cancelGenTask();
++ } else {
++ // not generated to status, so we may have to cancel
++ // note: gen task is always 1 status above current gen status if not null
++ this.requestedGenStatus = maxGenerationStatusNew;
++ if (this.generationTaskStatus != null && this.generationTaskStatus.isOrAfter(toCancel)) {
++ // TOOD is this even possible? i don't think so
++ throw new IllegalStateException("?????");
++ }
++ }
++ }
++ }
++ }
++
++ if (newState != oldState) {
++ if (newState.isOrAfter(oldState)) {
++ // status upgrade
++ if (!oldState.isOrAfter(ChunkHolder.FullChunkStatus.BORDER) && newState.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
++ // may need to schedule full load
++ if (this.currentGenStatus != ChunkStatus.FULL) {
++ if (this.requestedGenStatus != null) {
++ this.requestedGenStatus = ChunkStatus.FULL;
++ } else {
++ this.scheduler.schedule(
++ this.chunkX, this.chunkZ, ChunkStatus.FULL, this, scheduledTasks
++ );
++ }
++ } else {
++ // now we are fully loaded
++ this.queueBorderFullStatus(true, changedLoadStatus);
++ }
++ }
++ } else {
++ // status downgrade
++ if (!newState.isOrAfter(ChunkHolder.FullChunkStatus.ENTITY_TICKING) && oldState.isOrAfter(ChunkHolder.FullChunkStatus.ENTITY_TICKING)) {
++ this.completeFullStatusConsumers(ChunkHolder.FullChunkStatus.ENTITY_TICKING, null);
++ }
++
++ if (!newState.isOrAfter(ChunkHolder.FullChunkStatus.TICKING) && oldState.isOrAfter(ChunkHolder.FullChunkStatus.TICKING)) {
++ this.completeFullStatusConsumers(ChunkHolder.FullChunkStatus.TICKING, null);
++ }
++
++ if (!newState.isOrAfter(ChunkHolder.FullChunkStatus.BORDER) && oldState.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
++ this.completeFullStatusConsumers(ChunkHolder.FullChunkStatus.BORDER, null);
++ }
++ }
++ }
++
++ if (oldState != newState) {
++ if (this.onTicketUpdate(oldState, newState)) {
++ changedLoadStatus.add(this);
++ }
++ }
++
++ if (oldUnloaded != newUnloaded) {
++ this.checkUnload();
++ }
++ }
++
++ /*
++ For full chunks, vanilla just loads chunks around it up to FEATURES, 1 radius
++
++ For ticking chunks, it updates the persistent entity manager (soon to be completely nuked by EntitySliceManager, which
++ will also need to be updated but with far less implications)
++ It also shoves the scheduled block ticks into the tick scheduler
++
++ For entity ticking chunks, updates the entity manager (see above)
++ */
++
++ static final int NEIGHBOUR_RADIUS = 2;
++ private long fullNeighbourChunksLoadedBitset;
++
++ private static int getFullNeighbourIndex(final int relativeX, final int relativeZ) {
++ // index = (relativeX + NEIGHBOUR_CACHE_RADIUS) + (relativeZ + NEIGHBOUR_CACHE_RADIUS) * (NEIGHBOUR_CACHE_RADIUS * 2 + 1)
++ // optimised variant of the above by moving some of the ops to compile time
++ return relativeX + (relativeZ * (NEIGHBOUR_RADIUS * 2 + 1)) + (NEIGHBOUR_RADIUS + NEIGHBOUR_RADIUS * ((NEIGHBOUR_RADIUS * 2 + 1)));
++ }
++ public final boolean isNeighbourFullLoaded(final int relativeX, final int relativeZ) {
++ return (this.fullNeighbourChunksLoadedBitset & (1L << getFullNeighbourIndex(relativeX, relativeZ))) != 0;
++ }
++
++ // returns true if this chunk changed full status
++ public final boolean setNeighbourFullLoaded(final int relativeX, final int relativeZ) {
++ final long before = this.fullNeighbourChunksLoadedBitset;
++ final int index = getFullNeighbourIndex(relativeX, relativeZ);
++ this.fullNeighbourChunksLoadedBitset |= (1L << index);
++ return this.onNeighbourChange(before, this.fullNeighbourChunksLoadedBitset);
++ }
++
++ // returns true if this chunk changed full status
++ public final boolean setNeighbourFullUnloaded(final int relativeX, final int relativeZ) {
++ final long before = this.fullNeighbourChunksLoadedBitset;
++ final int index = getFullNeighbourIndex(relativeX, relativeZ);
++ this.fullNeighbourChunksLoadedBitset &= ~(1L << index);
++ return this.onNeighbourChange(before, this.fullNeighbourChunksLoadedBitset);
++ }
++
++ public static boolean areNeighboursFullLoaded(final long bitset, final int radius) {
++ // index = relativeX + (relativeZ * (NEIGHBOUR_CACHE_RADIUS * 2 + 1)) + (NEIGHBOUR_CACHE_RADIUS + NEIGHBOUR_CACHE_RADIUS * ((NEIGHBOUR_CACHE_RADIUS * 2 + 1)))
++ switch (radius) {
++ case 0: {
++ return (bitset & (1L << getFullNeighbourIndex(0, 0))) != 0L;
++ }
++ case 1: {
++ long mask = 0L;
++ for (int dx = -1; dx <= 1; ++dx) {
++ for (int dz = -1; dz <= 1; ++dz) {
++ mask |= (1L << getFullNeighbourIndex(dx, dz));
++ }
++ }
++ return (bitset & mask) == mask;
++ }
++ case 2: {
++ long mask = 0L;
++ for (int dx = -2; dx <= 2; ++dx) {
++ for (int dz = -2; dz <= 2; ++dz) {
++ mask |= (1L << getFullNeighbourIndex(dx, dz));
++ }
++ }
++ return (bitset & mask) == mask;
++ }
++
++ default: {
++ throw new IllegalArgumentException("Radius not recognized: " + radius);
++ }
++ }
++ }
++
++ // upper 16 bits are pending status, lower 16 bits are current status
++ private volatile long chunkStatus;
++ private static final long PENDING_STATUS_MASK = Long.MIN_VALUE >> 31;
++ private static final ChunkHolder.FullChunkStatus[] CHUNK_STATUS_BY_ID = ChunkHolder.FullChunkStatus.values();
++ private static final VarHandle CHUNK_STATUS_HANDLE = ConcurrentUtil.getVarHandle(NewChunkHolder.class, "chunkStatus", long.class);
++
++ public static ChunkHolder.FullChunkStatus getCurrentChunkStatus(final long encoded) {
++ return CHUNK_STATUS_BY_ID[(int)encoded];
++ }
++
++ public static ChunkHolder.FullChunkStatus getPendingChunkStatus(final long encoded) {
++ return CHUNK_STATUS_BY_ID[(int)(encoded >>> 32)];
++ }
++
++ public ChunkHolder.FullChunkStatus getChunkStatus() {
++ return getCurrentChunkStatus(((long)CHUNK_STATUS_HANDLE.getVolatile((NewChunkHolder)this)));
++ }
++
++ public boolean isEntityTickingReady() {
++ return this.getChunkStatus().isOrAfter(ChunkHolder.FullChunkStatus.ENTITY_TICKING);
++ }
++
++ public boolean isTickingReady() {
++ return this.getChunkStatus().isOrAfter(ChunkHolder.FullChunkStatus.TICKING);
++ }
++
++ public boolean isFullChunkReady() {
++ return this.getChunkStatus().isOrAfter(ChunkHolder.FullChunkStatus.BORDER);
++ }
++
++ private static ChunkHolder.FullChunkStatus getStatusForBitset(final long bitset) {
++ if (areNeighboursFullLoaded(bitset, 2)) {
++ return ChunkHolder.FullChunkStatus.ENTITY_TICKING;
++ } else if (areNeighboursFullLoaded(bitset, 1)) {
++ return ChunkHolder.FullChunkStatus.TICKING;
++ } else if (areNeighboursFullLoaded(bitset, 0)) {
++ return ChunkHolder.FullChunkStatus.BORDER;
++ } else {
++ return ChunkHolder.FullChunkStatus.INACCESSIBLE;
++ }
++ }
++
++ // note: only while updating ticket level, so holds ticket update lock + scheduling lock
++ protected final boolean onTicketUpdate(final ChunkHolder.FullChunkStatus oldState, final ChunkHolder.FullChunkStatus newState) {
++ if (oldState == newState) {
++ return false;
++ }
++
++ // preserve border request after full status complete, as it does not set anything in the bitset
++ ChunkHolder.FullChunkStatus byNeighbours = getStatusForBitset(this.fullNeighbourChunksLoadedBitset);
++ if (byNeighbours == ChunkHolder.FullChunkStatus.INACCESSIBLE && newState.isOrAfter(ChunkHolder.FullChunkStatus.BORDER) && this.currentGenStatus == ChunkStatus.FULL) {
++ byNeighbours = ChunkHolder.FullChunkStatus.BORDER;
++ }
++
++ final ChunkHolder.FullChunkStatus toSet;
++
++ if (newState.isOrAfter(byNeighbours)) {
++ // must clamp to neighbours level, even though we have the ticket level
++ toSet = byNeighbours;
++ } else {
++ // must clamp to ticket level, even though we have the neighbours
++ toSet = newState;
++ }
++
++ long curr = (long)CHUNK_STATUS_HANDLE.getVolatile((NewChunkHolder)this);
++
++ if (curr == ((long)toSet.ordinal() | ((long)toSet.ordinal() << 32))) {
++ // nothing to do
++ return false;
++ }
++
++ int failures = 0;
++ for (;;) {
++ final long update = (curr & ~PENDING_STATUS_MASK) | ((long)toSet.ordinal() << 32);
++ if (curr == (curr = (long)CHUNK_STATUS_HANDLE.compareAndExchange((NewChunkHolder)this, curr, update))) {
++ return true;
++ }
++
++ ++failures;
++ for (int i = 0; i < failures; ++i) {
++ ConcurrentUtil.backoff();
++ }
++ }
++ }
++
++ protected final boolean onNeighbourChange(final long bitsetBefore, final long bitsetAfter) {
++ ChunkHolder.FullChunkStatus oldState = getStatusForBitset(bitsetBefore);
++ ChunkHolder.FullChunkStatus newState = getStatusForBitset(bitsetAfter);
++ final ChunkHolder.FullChunkStatus currStateTicketLevel = ChunkHolder.getFullChunkStatus(this.oldTicketLevel);
++ if (oldState.isOrAfter(currStateTicketLevel)) {
++ oldState = currStateTicketLevel;
++ }
++ if (newState.isOrAfter(currStateTicketLevel)) {
++ newState = currStateTicketLevel;
++ }
++ // preserve border request after full status complete, as it does not set anything in the bitset
++ if (newState == ChunkHolder.FullChunkStatus.INACCESSIBLE && currStateTicketLevel.isOrAfter(ChunkHolder.FullChunkStatus.BORDER) && this.currentGenStatus == ChunkStatus.FULL) {
++ newState = ChunkHolder.FullChunkStatus.BORDER;
++ }
++
++ if (oldState == newState) {
++ return false;
++ }
++
++ int failures = 0;
++ for (long curr = (long)CHUNK_STATUS_HANDLE.getVolatile((NewChunkHolder)this);;) {
++ final long update = (curr & ~PENDING_STATUS_MASK) | ((long)newState.ordinal() << 32);
++ if (curr == (curr = (long)CHUNK_STATUS_HANDLE.compareAndExchange((NewChunkHolder)this, curr, update))) {
++ return true;
++ }
++
++ ++failures;
++ for (int i = 0; i < failures; ++i) {
++ ConcurrentUtil.backoff();
++ }
++ }
++ }
++
++ private boolean queueBorderFullStatus(final boolean loaded, final List changedFullStatus) {
++ final ChunkHolder.FullChunkStatus toStatus = loaded ? ChunkHolder.FullChunkStatus.BORDER : ChunkHolder.FullChunkStatus.INACCESSIBLE;
++
++ int failures = 0;
++ for (long curr = (long)CHUNK_STATUS_HANDLE.getVolatile((NewChunkHolder)this);;) {
++ final ChunkHolder.FullChunkStatus currPending = getPendingChunkStatus(curr);
++ if (loaded && currPending != ChunkHolder.FullChunkStatus.INACCESSIBLE) {
++ throw new IllegalStateException("Expected " + ChunkHolder.FullChunkStatus.INACCESSIBLE + " for pending, but got " + currPending);
++ }
++
++ final long update = (curr & ~PENDING_STATUS_MASK) | ((long)toStatus.ordinal() << 32);
++ if (curr == (curr = (long)CHUNK_STATUS_HANDLE.compareAndExchange((NewChunkHolder)this, curr, update))) {
++ if ((int)(update) != (int)(update >>> 32)) {
++ changedFullStatus.add(this);
++ return true;
++ }
++ return false;
++ }
++
++ ++failures;
++ for (int i = 0; i < failures; ++i) {
++ ConcurrentUtil.backoff();
++ }
++ }
++ }
++
++ // only call on main thread, must hold ticket level and scheduling lock
++ private void onFullChunkLoadChange(final boolean loaded, final List changedFullStatus) {
++ for (int dz = -NEIGHBOUR_RADIUS; dz <= NEIGHBOUR_RADIUS; ++dz) {
++ for (int dx = -NEIGHBOUR_RADIUS; dx <= NEIGHBOUR_RADIUS; ++dx) {
++ final NewChunkHolder holder = (dx | dz) == 0 ? this : this.scheduler.chunkHolderManager.getChunkHolder(dx + this.chunkX, dz + this.chunkZ);
++ if (loaded) {
++ if (holder.setNeighbourFullLoaded(-dx, -dz)) {
++ changedFullStatus.add(holder);
++ }
++ } else {
++ if (holder != null && holder.setNeighbourFullUnloaded(-dx, -dz)) {
++ changedFullStatus.add(holder);
++ }
++ }
++ }
++ }
++ }
++
++ private ChunkHolder.FullChunkStatus updateCurrentState(final ChunkHolder.FullChunkStatus to) {
++ int failures = 0;
++ for (long curr = (long)CHUNK_STATUS_HANDLE.getVolatile((NewChunkHolder)this);;) {
++ final long update = (curr & PENDING_STATUS_MASK) | (long)to.ordinal();
++ if (curr == (curr = (long)CHUNK_STATUS_HANDLE.compareAndExchange((NewChunkHolder)this, curr, update))) {
++ return getPendingChunkStatus(curr);
++ }
++
++ ++failures;
++ for (int i = 0; i < failures; ++i) {
++ ConcurrentUtil.backoff();
++ }
++ }
++ }
++
++ private void changeEntityChunkStatus(final ChunkHolder.FullChunkStatus toStatus) {
++ this.world.getEntityLookup().chunkStatusChange(this.chunkX, this.chunkZ, toStatus);
++ }
++
++ private boolean processingFullStatus = false;
++
++ // only to be called on the main thread, no locks need to be held
++ public boolean handleFullStatusChange(final List changedFullStatus) {
++ TickThread.ensureTickThread(this.world, this.chunkX, this.chunkZ, "Cannot update full status thread off-main");
++
++ boolean ret = false;
++
++ if (this.processingFullStatus) {
++ // we cannot process updates recursively
++ return ret;
++ }
++
++ // note: use opaque reads for chunk status read since we need it to be atomic
++
++ // test if anything changed
++ final long statusCheck = (long)CHUNK_STATUS_HANDLE.getOpaque((NewChunkHolder)this);
++ if ((int)statusCheck == (int)(statusCheck >>> 32)) {
++ // nothing changed
++ return ret;
++ }
++
++ final ChunkTaskScheduler scheduler = this.scheduler;
++ final ChunkHolderManager holderManager = scheduler.chunkHolderManager;
++ final int ticketKeep;
++ final Long ticketId;
++ holderManager.ticketLock.lock();
++ try {
++ ticketKeep = this.currentTicketLevel;
++ ticketId = Long.valueOf(holderManager.getNextStatusUpgradeId());
++ holderManager.addTicketAtLevel(TicketType.STATUS_UPGRADE, this.chunkX, this.chunkZ, ticketKeep, ticketId);
++ } finally {
++ holderManager.ticketLock.unlock();
++ }
++
++ this.processingFullStatus = true;
++ try {
++ for (;;) {
++ final long currStateEncoded = (long)CHUNK_STATUS_HANDLE.getOpaque((NewChunkHolder)this);
++ final ChunkHolder.FullChunkStatus currState = getCurrentChunkStatus(currStateEncoded);
++ ChunkHolder.FullChunkStatus nextState = getPendingChunkStatus(currStateEncoded);
++ if (currState == nextState) {
++ if (nextState == ChunkHolder.FullChunkStatus.INACCESSIBLE) {
++ this.scheduler.schedulingLock.lock();
++ try {
++ this.checkUnload();
++ } finally {
++ this.scheduler.schedulingLock.unlock();
++ }
++ }
++ break;
++ }
++
++ // chunks cannot downgrade state while status is pending a change
++ final LevelChunk chunk = (LevelChunk)this.currentChunk;
++
++ // Note: we assume that only load/unload contain plugin logic
++ // plugin logic is anything stupid enough to possibly change the chunk status while it is already
++ // being changed (i.e during load it is possible it will try to set to full ticking)
++ // in order to allow this change, we also need this plugin logic to be contained strictly after all
++ // of the chunk system load callbacks are invoked
++ if (nextState.isOrAfter(currState)) {
++ // state upgrade
++ if (!currState.isOrAfter(ChunkHolder.FullChunkStatus.BORDER) && nextState.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
++ nextState = this.updateCurrentState(ChunkHolder.FullChunkStatus.BORDER);
++ holderManager.ensureInAutosave(this);
++ this.changeEntityChunkStatus(ChunkHolder.FullChunkStatus.BORDER);
++ chunk.onChunkLoad(this);
++ this.onFullChunkLoadChange(true, changedFullStatus);
++ this.completeFullStatusConsumers(ChunkHolder.FullChunkStatus.BORDER, chunk);
++ }
++
++ if (!currState.isOrAfter(ChunkHolder.FullChunkStatus.TICKING) && nextState.isOrAfter(ChunkHolder.FullChunkStatus.TICKING)) {
++ nextState = this.updateCurrentState(ChunkHolder.FullChunkStatus.TICKING);
++ this.changeEntityChunkStatus(ChunkHolder.FullChunkStatus.TICKING);
++ chunk.onChunkTicking(this);
++ this.completeFullStatusConsumers(ChunkHolder.FullChunkStatus.TICKING, chunk);
++ }
++
++ if (!currState.isOrAfter(ChunkHolder.FullChunkStatus.ENTITY_TICKING) && nextState.isOrAfter(ChunkHolder.FullChunkStatus.ENTITY_TICKING)) {
++ nextState = this.updateCurrentState(ChunkHolder.FullChunkStatus.ENTITY_TICKING);
++ this.changeEntityChunkStatus(ChunkHolder.FullChunkStatus.ENTITY_TICKING);
++ chunk.onChunkEntityTicking(this);
++ this.completeFullStatusConsumers(ChunkHolder.FullChunkStatus.ENTITY_TICKING, chunk);
++ }
++ } else {
++ if (currState.isOrAfter(ChunkHolder.FullChunkStatus.ENTITY_TICKING) && !nextState.isOrAfter(ChunkHolder.FullChunkStatus.ENTITY_TICKING)) {
++ this.changeEntityChunkStatus(ChunkHolder.FullChunkStatus.TICKING);
++ chunk.onChunkNotEntityTicking(this);
++ nextState = this.updateCurrentState(ChunkHolder.FullChunkStatus.TICKING);
++ }
++
++ if (currState.isOrAfter(ChunkHolder.FullChunkStatus.TICKING) && !nextState.isOrAfter(ChunkHolder.FullChunkStatus.TICKING)) {
++ this.changeEntityChunkStatus(ChunkHolder.FullChunkStatus.BORDER);
++ chunk.onChunkNotTicking(this);
++ nextState = this.updateCurrentState(ChunkHolder.FullChunkStatus.BORDER);
++ }
++
++ if (currState.isOrAfter(ChunkHolder.FullChunkStatus.BORDER) && !nextState.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
++ this.onFullChunkLoadChange(false, changedFullStatus);
++ this.changeEntityChunkStatus(ChunkHolder.FullChunkStatus.INACCESSIBLE);
++ chunk.onChunkUnload(this);
++ nextState = this.updateCurrentState(ChunkHolder.FullChunkStatus.INACCESSIBLE);
++ }
++ }
++
++ ret = true;
++ }
++ } finally {
++ this.processingFullStatus = false;
++ holderManager.removeTicketAtLevel(TicketType.STATUS_UPGRADE, this.chunkX, this.chunkZ, ticketKeep, ticketId);
++ }
++
++ return ret;
++ }
++
++ // note: must hold scheduling lock
++ // rets true if the current requested gen status is not null (effectively, whether further scheduling is not needed)
++ boolean upgradeGenTarget(final ChunkStatus toStatus) {
++ if (toStatus == null) {
++ throw new NullPointerException("toStatus cannot be null");
++ }
++ if (this.requestedGenStatus == null && this.generationTask == null) {
++ return false;
++ }
++ if (this.requestedGenStatus == null || !this.requestedGenStatus.isOrAfter(toStatus)) {
++ this.requestedGenStatus = toStatus;
++ }
++ return true;
++ }
++
++ public void setGenerationTarget(final ChunkStatus toStatus) {
++ this.requestedGenStatus = toStatus;
++ }
++
++ public boolean hasGenerationTask() {
++ return this.generationTask != null;
++ }
++
++ public ChunkStatus getCurrentGenStatus() {
++ return this.currentGenStatus;
++ }
++
++ public ChunkStatus getRequestedGenStatus() {
++ return this.requestedGenStatus;
++ }
++
++ private final Reference2ObjectOpenHashMap>> statusWaiters = new Reference2ObjectOpenHashMap<>();
++
++ void addStatusConsumer(final ChunkStatus status, final Consumer consumer) {
++ this.statusWaiters.computeIfAbsent(status, (final ChunkStatus keyInMap) -> {
++ return new ArrayList<>(4);
++ }).add(consumer);
++ }
++
++ private void completeStatusConsumers(ChunkStatus status, final ChunkAccess chunk) {
++ // need to tell future statuses to complete if cancelled
++ do {
++ this.completeStatusConsumers0(status, chunk);
++ } while (chunk == null && status != (status = status.getNextStatus()));
++ }
++
++ private void completeStatusConsumers0(final ChunkStatus status, final ChunkAccess chunk) {
++ final List> consumers;
++ consumers = this.statusWaiters.remove(status);
++
++ if (consumers == null) {
++ return;
++ }
++
++ // must be scheduled to main, we do not trust the callback to not do anything stupid
++ this.scheduler.scheduleChunkTask(this.chunkX, this.chunkZ, () -> {
++ for (final Consumer consumer : consumers) {
++ try {
++ consumer.accept(chunk);
++ } catch (final ThreadDeath thr) {
++ throw thr;
++ } catch (final Throwable thr) {
++ LOGGER.error("Failed to process chunk status callback", thr);
++ }
++ }
++ }, PrioritisedExecutor.Priority.HIGHEST);
++ }
++
++ private final Reference2ObjectOpenHashMap>> fullStatusWaiters = new Reference2ObjectOpenHashMap<>();
++
++ void addFullStatusConsumer(final ChunkHolder.FullChunkStatus status, final Consumer consumer) {
++ this.fullStatusWaiters.computeIfAbsent(status, (final ChunkHolder.FullChunkStatus keyInMap) -> {
++ return new ArrayList<>(4);
++ }).add(consumer);
++ }
++
++ private void completeFullStatusConsumers(ChunkHolder.FullChunkStatus status, final LevelChunk chunk) {
++ // need to tell future statuses to complete if cancelled
++ final ChunkHolder.FullChunkStatus max = CHUNK_STATUS_BY_ID[CHUNK_STATUS_BY_ID.length - 1];
++
++ for (;;) {
++ this.completeFullStatusConsumers0(status, chunk);
++ if (chunk != null || status == max) {
++ break;
++ }
++ status = CHUNK_STATUS_BY_ID[status.ordinal() + 1];
++ }
++ }
++
++ private void completeFullStatusConsumers0(final ChunkHolder.FullChunkStatus status, final LevelChunk chunk) {
++ final List> consumers;
++ consumers = this.fullStatusWaiters.remove(status);
++
++ if (consumers == null) {
++ return;
++ }
++
++ // must be scheduled to main, we do not trust the callback to not do anything stupid
++ this.scheduler.scheduleChunkTask(this.chunkX, this.chunkZ, () -> {
++ for (final Consumer consumer : consumers) {
++ try {
++ consumer.accept(chunk);
++ } catch (final ThreadDeath thr) {
++ throw thr;
++ } catch (final Throwable thr) {
++ LOGGER.error("Failed to process chunk status callback", thr);
++ }
++ }
++ }, PrioritisedExecutor.Priority.HIGHEST);
++ }
++
++ // note: must hold scheduling lock
++ private void onChunkGenComplete(final ChunkAccess newChunk, final ChunkStatus newStatus,
++ final List scheduleList, final List changedLoadStatus) {
++ if (!this.neighboursBlockingGenTask.isEmpty()) {
++ throw new IllegalStateException("Cannot have neighbours blocking this gen task");
++ }
++ if (newChunk != null || (this.requestedGenStatus == null || !this.requestedGenStatus.isOrAfter(newStatus))) {
++ this.completeStatusConsumers(newStatus, newChunk);
++ }
++ // done now, clear state (must be done before scheduling new tasks)
++ this.generationTask = null;
++ this.generationTaskStatus = null;
++ if (newChunk == null) {
++ // task was cancelled
++ // should be careful as this could be called while holding the schedule lock and/or inside the
++ // ticket level update
++ // while a task may be cancelled, it is possible for it to be later re-scheduled
++ // however, because generationTask is only set to null on _completion_, the scheduler leaves
++ // the rescheduling logic to us here
++ final ChunkStatus requestedGenStatus = this.requestedGenStatus;
++ this.requestedGenStatus = null;
++ if (requestedGenStatus != null) {
++ // it looks like it has been requested, so we must reschedule
++ if (!this.neighboursWaitingForUs.isEmpty()) {
++ for (final Iterator> iterator = this.neighboursWaitingForUs.reference2ObjectEntrySet().fastIterator(); iterator.hasNext();) {
++ final Reference2ObjectMap.Entry entry = iterator.next();
++
++ final NewChunkHolder chunkHolder = entry.getKey();
++ final ChunkStatus toStatus = entry.getValue();
++
++ if (!requestedGenStatus.isOrAfter(toStatus)) {
++ // if we were cancelled, we are responsible for removing the waiter
++ if (!chunkHolder.neighboursBlockingGenTask.remove(this)) {
++ throw new IllegalStateException("Corrupt state");
++ }
++ if (chunkHolder.neighboursBlockingGenTask.isEmpty()) {
++ chunkHolder.checkUnload();
++ }
++ iterator.remove();
++ continue;
++ }
++ }
++ }
++
++ // note: only after generationTask -> null, generationTaskStatus -> null, and requestedGenStatus -> null
++ this.scheduler.schedule(
++ this.chunkX, this.chunkZ, requestedGenStatus, this, scheduleList
++ );
++
++ // return, can't do anything further
++ return;
++ }
++
++ if (!this.neighboursWaitingForUs.isEmpty()) {
++ for (final NewChunkHolder chunkHolder : this.neighboursWaitingForUs.keySet()) {
++ if (!chunkHolder.neighboursBlockingGenTask.remove(this)) {
++ throw new IllegalStateException("Corrupt state");
++ }
++ if (chunkHolder.neighboursBlockingGenTask.isEmpty()) {
++ chunkHolder.checkUnload();
++ }
++ }
++ this.neighboursWaitingForUs.clear();
++ }
++ // reset priority, we have nothing left to generate to
++ this.setPriority(PrioritisedExecutor.Priority.NORMAL);
++ this.checkUnload();
++ return;
++ }
++
++ this.currentChunk = newChunk;
++ this.currentGenStatus = newStatus;
++ this.lastChunkCompletion = new ChunkCompletion(newChunk, newStatus);
++
++ final ChunkStatus requestedGenStatus = this.requestedGenStatus;
++
++ List needsScheduling = null;
++ boolean recalculatePriority = false;
++ for (final Iterator> iterator
++ = this.neighboursWaitingForUs.reference2ObjectEntrySet().fastIterator(); iterator.hasNext();) {
++ final Reference2ObjectMap.Entry entry = iterator.next();
++ final NewChunkHolder neighbour = entry.getKey();
++ final ChunkStatus requiredStatus = entry.getValue();
++
++ if (!newStatus.isOrAfter(requiredStatus)) {
++ if (requestedGenStatus == null || !requestedGenStatus.isOrAfter(requiredStatus)) {
++ // if we're cancelled, still need to clear this map
++ if (!neighbour.neighboursBlockingGenTask.remove(this)) {
++ throw new IllegalStateException("Neighbour is not waiting for us?");
++ }
++ if (neighbour.neighboursBlockingGenTask.isEmpty()) {
++ neighbour.checkUnload();
++ }
++
++ iterator.remove();
++ }
++ continue;
++ }
++
++ // doesn't matter what isCancelled is here, we need to schedule if we can
++
++ recalculatePriority = true;
++ if (!neighbour.neighboursBlockingGenTask.remove(this)) {
++ throw new IllegalStateException("Neighbour is not waiting for us?");
++ }
++
++ if (neighbour.neighboursBlockingGenTask.isEmpty()) {
++ if (neighbour.requestedGenStatus != null) {
++ if (needsScheduling == null) {
++ needsScheduling = new ArrayList<>();
++ }
++ needsScheduling.add(neighbour);
++ } else {
++ neighbour.checkUnload();
++ }
++ }
++
++ // remove last; access to entry will throw if removed
++ iterator.remove();
++ }
++
++ if (newStatus == ChunkStatus.FULL) {
++ this.lockPriority();
++ // must use oldTicketLevel, we hold the schedule lock but not the ticket level lock
++ // however, schedule lock needs to be held for ticket level callback, so we're fine here
++ if (ChunkHolder.getFullChunkStatus(this.oldTicketLevel).isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
++ this.queueBorderFullStatus(true, changedLoadStatus);
++ }
++ }
++
++ if (recalculatePriority) {
++ this.recalculateNeighbourRequestedPriority();
++ }
++
++ if (requestedGenStatus != null && !newStatus.isOrAfter(requestedGenStatus)) {
++ this.scheduleNeighbours(needsScheduling, scheduleList);
++
++ // we need to schedule more tasks now
++ this.scheduler.schedule(
++ this.chunkX, this.chunkZ, requestedGenStatus, this, scheduleList
++ );
++ } else {
++ // we're done now
++ if (requestedGenStatus != null) {
++ this.requestedGenStatus = null;
++ }
++ // reached final stage, so stop scheduling now
++ this.setPriority(PrioritisedExecutor.Priority.NORMAL);
++ this.checkUnload();
++
++ this.scheduleNeighbours(needsScheduling, scheduleList);
++ }
++ }
++
++ private void scheduleNeighbours(final List needsScheduling, final List scheduleList) {
++ if (needsScheduling != null) {
++ for (int i = 0, len = needsScheduling.size(); i < len; ++i) {
++ final NewChunkHolder neighbour = needsScheduling.get(i);
++
++ this.scheduler.schedule(
++ neighbour.chunkX, neighbour.chunkZ, neighbour.requestedGenStatus, neighbour, scheduleList
++ );
++ }
++ }
++ }
++
++ public void setGenerationTask(final ChunkProgressionTask generationTask, final ChunkStatus taskStatus,
++ final List neighbours) {
++ if (this.generationTask != null || (this.currentGenStatus != null && this.currentGenStatus.isOrAfter(taskStatus))) {
++ throw new IllegalStateException("Currently generating or provided task is trying to generate to a level we are already at!");
++ }
++ if (this.requestedGenStatus == null || !this.requestedGenStatus.isOrAfter(taskStatus)) {
++ throw new IllegalStateException("Cannot schedule generation task when not requested");
++ }
++ this.generationTask = generationTask;
++ this.generationTaskStatus = taskStatus;
++
++ for (int i = 0, len = neighbours.size(); i < len; ++i) {
++ neighbours.get(i).addNeighbourUsingChunk();
++ }
++
++ this.checkUnload();
++
++ generationTask.onComplete((final ChunkAccess access, final Throwable thr) -> {
++ if (generationTask != this.generationTask) {
++ throw new IllegalStateException(
++ "Cannot complete generation task '" + generationTask + "' because we are waiting on '" + this.generationTask + "' instead!"
++ );
++ }
++ if (thr != null) {
++ if (this.genTaskException != null) {
++ // first one is probably the TRUE problem
++ return;
++ }
++ // don't set generation task to null, so that scheduling will not attempt to create another task and it
++ // will automatically block any further scheduling usage of this chunk as it will wait forever for a failed
++ // task to complete
++ this.genTaskException = thr;
++ this.failedGenStatus = taskStatus;
++ this.genTaskFailedThread = Thread.currentThread();
++
++ this.scheduler.unrecoverableChunkSystemFailure(this.chunkX, this.chunkZ, Map.of(
++ "Generation task", ChunkTaskScheduler.stringIfNull(generationTask),
++ "Task to status", ChunkTaskScheduler.stringIfNull(taskStatus)
++ ), thr);
++ return;
++ }
++
++ final boolean scheduleTasks;
++ List tasks = ChunkHolderManager.getCurrentTicketUpdateScheduling();
++ if (tasks == null) {
++ scheduleTasks = true;
++ tasks = new ArrayList<>();
++ } else {
++ scheduleTasks = false;
++ // we are currently updating ticket levels, so we already hold the schedule lock
++ // this means we have to leave the ticket level update to handle the scheduling
++ }
++ final List changedLoadStatus = new ArrayList<>();
++ this.scheduler.schedulingLock.lock();
++ try {
++ for (int i = 0, len = neighbours.size(); i < len; ++i) {
++ neighbours.get(i).removeNeighbourUsingChunk();
++ }
++ this.onChunkGenComplete(access, taskStatus, tasks, changedLoadStatus);
++ } finally {
++ this.scheduler.schedulingLock.unlock();
++ }
++ this.scheduler.chunkHolderManager.addChangedStatuses(changedLoadStatus);
++
++ if (scheduleTasks) {
++ // can't hold the lock while scheduling, so we have to build the tasks and then schedule after
++ for (int i = 0, len = tasks.size(); i < len; ++i) {
++ tasks.get(i).schedule();
++ }
++ }
++ });
++ }
++
++ public PoiChunk getPoiChunk() {
++ return this.poiChunk;
++ }
++
++ public ChunkEntitySlices getEntityChunk() {
++ return this.entityChunk;
++ }
++
++ public long lastAutoSave;
++
++ public boolean save(final boolean shutdown, final boolean unloading) {
++ TickThread.ensureTickThread(this.world, this.chunkX, this.chunkZ, "Cannot save data off-main");
++
++ ChunkAccess chunk = this.getCurrentChunk();
++ PoiChunk poi = this.getPoiChunk();
++ ChunkEntitySlices entities = this.getEntityChunk();
++ boolean executedUnloadTask = false;
++
++ if (shutdown) {
++ // make sure that the async unloads complete
++ if (this.unloadState != null) {
++ // must have errored during unload
++ chunk = this.unloadState.chunk();
++ poi = this.unloadState.poiChunk();
++ entities = this.unloadState.entityChunk();
++ }
++ final UnloadTask chunkUnloadTask = this.chunkDataUnload;
++ final DelayedPrioritisedTask chunkDataUnloadTask = chunkUnloadTask == null ? null : chunkUnloadTask.task();
++ if (chunkDataUnloadTask != null) {
++ final PrioritisedExecutor.PrioritisedTask unloadTask = chunkDataUnloadTask.getTask();
++ if (unloadTask != null) {
++ executedUnloadTask = unloadTask.execute();
++ }
++ }
++ }
++
++ boolean canSaveChunk = !(chunk instanceof LevelChunk levelChunk && levelChunk.mustNotSave) &&
++ (chunk != null && ((shutdown || chunk instanceof LevelChunk) && chunk.isUnsaved()));
++ boolean canSavePOI = !(chunk instanceof LevelChunk levelChunk && levelChunk.mustNotSave) && (poi != null && poi.isDirty());
++ boolean canSaveEntities = entities != null;
++
++ try (co.aikar.timings.Timing ignored = this.world.timings.chunkSave.startTiming()) { // Paper
++ if (canSaveChunk) {
++ canSaveChunk = this.saveChunk(chunk, unloading);
++ }
++ if (canSavePOI) {
++ canSavePOI = this.savePOI(poi, unloading);
++ }
++ if (canSaveEntities) {
++ // on shutdown, we need to force transient entity chunks to save
++ canSaveEntities = this.saveEntities(entities, unloading || shutdown);
++ if (unloading || shutdown) {
++ this.lastEntityUnload = null;
++ }
++ }
++ }
++
++ return executedUnloadTask | canSaveChunk | canSaveEntities | canSavePOI;
++ }
++
++ static final class AsyncChunkSerializeTask implements Runnable {
++
++ private final ServerLevel world;
++ private final ChunkAccess chunk;
++ private final ChunkSerializer.AsyncSaveData asyncSaveData;
++ private final NewChunkHolder toComplete;
++
++ public AsyncChunkSerializeTask(final ServerLevel world, final ChunkAccess chunk, final ChunkSerializer.AsyncSaveData asyncSaveData,
++ final NewChunkHolder toComplete) {
++ this.world = world;
++ this.chunk = chunk;
++ this.asyncSaveData = asyncSaveData;
++ this.toComplete = toComplete;
++ }
++
++ @Override
++ public void run() {
++ final CompoundTag toSerialize;
++ try {
++ toSerialize = ChunkSerializer.saveChunk(this.world, this.chunk, this.asyncSaveData);
++ } catch (final ThreadDeath death) {
++ throw death;
++ } catch (final Throwable throwable) {
++ LOGGER.error("Failed to asynchronously save chunk " + this.chunk.getPos() + " for world '" + this.world.getWorld().getName() + "', falling back to synchronous save", throwable);
++ this.world.chunkTaskScheduler.scheduleChunkTask(this.chunk.locX, this.chunk.locZ, () -> {
++ final CompoundTag synchronousSave;
++ try {
++ synchronousSave = ChunkSerializer.saveChunk(AsyncChunkSerializeTask.this.world, AsyncChunkSerializeTask.this.chunk, AsyncChunkSerializeTask.this.asyncSaveData);
++ } catch (final ThreadDeath death) {
++ throw death;
++ } catch (final Throwable throwable2) {
++ LOGGER.error("Failed to synchronously save chunk " + AsyncChunkSerializeTask.this.chunk.getPos() + " for world '" + AsyncChunkSerializeTask.this.world.getWorld().getName() + "', chunk data will be lost", throwable2);
++ AsyncChunkSerializeTask.this.toComplete.completeAsyncChunkDataSave(null);
++ return;
++ }
++
++ AsyncChunkSerializeTask.this.toComplete.completeAsyncChunkDataSave(synchronousSave);
++ LOGGER.info("Successfully serialized chunk " + AsyncChunkSerializeTask.this.chunk.getPos() + " for world '" + AsyncChunkSerializeTask.this.world.getWorld().getName() + "' synchronously");
++
++ }, PrioritisedExecutor.Priority.HIGHEST);
++ return;
++ }
++ this.toComplete.completeAsyncChunkDataSave(toSerialize);
++ }
++
++ @Override
++ public String toString() {
++ return "AsyncChunkSerializeTask{" +
++ "chunk={pos=" + this.chunk.getPos() + ",world=\"" + this.world.getWorld().getName() + "\"}" +
++ "}";
++ }
++ }
++
++ private boolean saveChunk(final ChunkAccess chunk, final boolean unloading) {
++ if (!chunk.isUnsaved()) {
++ if (unloading) {
++ this.completeAsyncChunkDataSave(null);
++ }
++ return false;
++ }
++ boolean completing = false;
++ try {
++ if (unloading) {
++ try {
++ final ChunkSerializer.AsyncSaveData asyncSaveData = ChunkSerializer.getAsyncSaveData(this.world, chunk);
++
++ final PrioritisedExecutor.PrioritisedTask task = this.scheduler.loadExecutor.createTask(new AsyncChunkSerializeTask(this.world, chunk, asyncSaveData, this));
++
++ this.chunkDataUnload.task().setTask(task);
++
++ task.queue();
++
++ return true;
++ } catch (final ThreadDeath death) {
++ throw death;
++ } catch (final Throwable thr) {
++ LOGGER.error("Failed to prepare async chunk data (" + this.chunkX + "," + this.chunkZ + ") in world '" + this.world.getWorld().getName() + "', falling back to synchronous save", thr);
++ // fall through to synchronous save
++ }
++ }
++
++ final CompoundTag save = ChunkSerializer.saveChunk(this.world, chunk, null);
++
++ if (unloading) {
++ completing = true;
++ this.completeAsyncChunkDataSave(save);
++ LOGGER.info("Successfully serialized chunk data (" + this.chunkX + "," + this.chunkZ + ") in world '" + this.world.getWorld().getName() + "' synchronously");
++ } else {
++ RegionFileIOThread.scheduleSave(this.world, this.chunkX, this.chunkZ, save, RegionFileIOThread.RegionFileType.CHUNK_DATA);
++ }
++ } catch (final ThreadDeath death) {
++ throw death;
++ } catch (final Throwable thr) {
++ LOGGER.error("Failed to save chunk data (" + this.chunkX + "," + this.chunkZ + ") in world '" + this.world.getWorld().getName() + "'");
++ if (unloading && !completing) {
++ this.completeAsyncChunkDataSave(null);
++ }
++ }
++
++ return true;
++ }
++
++ private boolean lastEntitySaveNull;
++ private CompoundTag lastEntityUnload;
++ private boolean saveEntities(final ChunkEntitySlices entities, final boolean unloading) {
++ try {
++ CompoundTag mergeFrom = null;
++ if (entities.isTransient()) {
++ if (!unloading) {
++ // if we're a transient chunk, we cannot save until unloading because otherwise a double save will
++ // result in double adding the entities
++ return false;
++ }
++ try {
++ mergeFrom = RegionFileIOThread.loadData(this.world, this.chunkX, this.chunkZ, RegionFileIOThread.RegionFileType.ENTITY_DATA, PrioritisedExecutor.Priority.BLOCKING);
++ } catch (final Exception ex) {
++ LOGGER.error("Cannot merge transient entities for chunk (" + this.chunkX + "," + this.chunkZ + ") in world '" + this.world.getWorld().getName() + "', data on disk will be replaced", ex);
++ }
++ }
++
++ final CompoundTag save = entities.save();
++ if (mergeFrom != null) {
++ if (save == null) {
++ // don't override the data on disk with nothing
++ return false;
++ } else {
++ EntityStorage.copyEntities(mergeFrom, save);
++ }
++ }
++ if (save == null && this.lastEntitySaveNull) {
++ return false;
++ }
++
++ RegionFileIOThread.scheduleSave(this.world, this.chunkX, this.chunkZ, save, RegionFileIOThread.RegionFileType.ENTITY_DATA);
++ this.lastEntitySaveNull = save == null;
++ if (unloading) {
++ this.lastEntityUnload = save;
++ }
++ } catch (final ThreadDeath death) {
++ throw death;
++ } catch (final Throwable thr) {
++ LOGGER.error("Failed to save entity data (" + this.chunkX + "," + this.chunkZ + ") in world '" + this.world.getWorld().getName() + "'");
++ }
++
++ return true;
++ }
++
++ private boolean lastPoiSaveNull;
++ private boolean savePOI(final PoiChunk poi, final boolean unloading) {
++ try {
++ final CompoundTag save = poi.save();
++ poi.setDirty(false);
++ if (save == null && this.lastPoiSaveNull) {
++ if (unloading) {
++ this.poiDataUnload.completable().complete(null);
++ }
++ return false;
++ }
++
++ RegionFileIOThread.scheduleSave(this.world, this.chunkX, this.chunkZ, save, RegionFileIOThread.RegionFileType.POI_DATA);
++ this.lastPoiSaveNull = save == null;
++ if (unloading) {
++ this.poiDataUnload.completable().complete(save);
++ }
++ } catch (final ThreadDeath death) {
++ throw death;
++ } catch (final Throwable thr) {
++ LOGGER.error("Failed to save poi data (" + this.chunkX + "," + this.chunkZ + ") in world '" + this.world.getWorld().getName() + "'");
++ }
++
++ return true;
++ }
++
++ @Override
++ public String toString() {
++ final ChunkCompletion lastCompletion = this.lastChunkCompletion;
++ final ChunkEntitySlices entityChunk = this.entityChunk;
++ final long chunkStatus = this.chunkStatus;
++ final int fullChunkStatus = (int)chunkStatus;
++ final int pendingChunkStatus = (int)(chunkStatus >>> 32);
++ final ChunkHolder.FullChunkStatus currentFullStatus = fullChunkStatus < 0 || fullChunkStatus >= CHUNK_STATUS_BY_ID.length ? null : CHUNK_STATUS_BY_ID[fullChunkStatus];
++ final ChunkHolder.FullChunkStatus pendingFullStatus = pendingChunkStatus < 0 || pendingChunkStatus >= CHUNK_STATUS_BY_ID.length ? null : CHUNK_STATUS_BY_ID[pendingChunkStatus];
++ return "NewChunkHolder{" +
++ "world=" + this.world.getWorld().getName() +
++ ", chunkX=" + this.chunkX +
++ ", chunkZ=" + this.chunkZ +
++ ", entityChunkFromDisk=" + (entityChunk != null && !entityChunk.isTransient()) +
++ ", lastChunkCompletion={chunk_class=" + (lastCompletion == null || lastCompletion.chunk() == null ? "null" : lastCompletion.chunk().getClass().getName()) + ",status=" + (lastCompletion == null ? "null" : lastCompletion.genStatus()) + "}" +
++ ", currentGenStatus=" + this.currentGenStatus +
++ ", requestedGenStatus=" + this.requestedGenStatus +
++ ", generationTask=" + this.generationTask +
++ ", generationTaskStatus=" + this.generationTaskStatus +
++ ", priority=" + this.priority +
++ ", priorityLocked=" + this.priorityLocked +
++ ", neighbourRequestedPriority=" + this.neighbourRequestedPriority +
++ ", effective_priority=" + this.getEffectivePriority() +
++ ", oldTicketLevel=" + this.oldTicketLevel +
++ ", currentTicketLevel=" + this.currentTicketLevel +
++ ", totalNeighboursUsingThisChunk=" + this.totalNeighboursUsingThisChunk +
++ ", fullNeighbourChunksLoadedBitset=" + this.fullNeighbourChunksLoadedBitset +
++ ", chunkStatusRaw=" + chunkStatus +
++ ", currentChunkStatus=" + currentFullStatus +
++ ", pendingChunkStatus=" + pendingFullStatus +
++ ", is_unload_safe=" + this.isSafeToUnload() +
++ ", killed=" + this.killed +
++ '}';
++ }
++
++ private static JsonElement serializeCompletable(final Completable> completable) {
++ if (completable == null) {
++ return new JsonPrimitive("null");
++ }
++
++ final JsonObject ret = new JsonObject();
++ final boolean isCompleted = completable.isCompleted();
++ ret.addProperty("completed", Boolean.valueOf(isCompleted));
++
++ if (isCompleted) {
++ ret.addProperty("completed_exceptionally", Boolean.valueOf(completable.getThrowable() != null));
++ }
++
++ return ret;
++ }
++
++ // holds ticket and scheduling lock
++ public JsonObject getDebugJson() {
++ final JsonObject ret = new JsonObject();
++
++ final ChunkCompletion lastCompletion = this.lastChunkCompletion;
++ final ChunkEntitySlices slices = this.entityChunk;
++ final PoiChunk poiChunk = this.poiChunk;
++
++ ret.addProperty("chunkX", Integer.valueOf(this.chunkX));
++ ret.addProperty("chunkZ", Integer.valueOf(this.chunkZ));
++ ret.addProperty("entity_chunk", slices == null ? "null" : "transient=" + slices.isTransient());
++ ret.addProperty("poi_chunk", "null=" + (poiChunk == null));
++ ret.addProperty("completed_chunk_class", lastCompletion == null ? "null" : lastCompletion.chunk().getClass().getName());
++ ret.addProperty("completed_gen_status", lastCompletion == null ? "null" : lastCompletion.genStatus().toString());
++ ret.addProperty("priority", Objects.toString(this.priority));
++ ret.addProperty("neighbour_requested_priority", Objects.toString(this.neighbourRequestedPriority));
++ ret.addProperty("generation_task", Objects.toString(this.generationTask));
++ ret.addProperty("is_safe_unload", Objects.toString(this.isSafeToUnload()));
++ ret.addProperty("old_ticket_level", Integer.valueOf(this.oldTicketLevel));
++ ret.addProperty("current_ticket_level", Integer.valueOf(this.currentTicketLevel));
++ ret.addProperty("neighbours_using_chunk", Integer.valueOf(this.totalNeighboursUsingThisChunk));
++
++ final JsonObject neighbourWaitState = new JsonObject();
++ ret.add("neighbour_state", neighbourWaitState);
++
++ final JsonArray blockingGenNeighbours = new JsonArray();
++ neighbourWaitState.add("blocking_gen_task", blockingGenNeighbours);
++ for (final NewChunkHolder blockingGenNeighbour : this.neighboursBlockingGenTask) {
++ final JsonObject neighbour = new JsonObject();
++ blockingGenNeighbours.add(neighbour);
++
++ neighbour.addProperty("chunkX", Integer.valueOf(blockingGenNeighbour.chunkX));
++ neighbour.addProperty("chunkZ", Integer.valueOf(blockingGenNeighbour.chunkZ));
++ }
++
++ final JsonArray neighboursWaitingForUs = new JsonArray();
++ neighbourWaitState.add("neighbours_waiting_on_us", neighboursWaitingForUs);
++ for (final Reference2ObjectMap.Entry entry : this.neighboursWaitingForUs.reference2ObjectEntrySet()) {
++ final NewChunkHolder holder = entry.getKey();
++ final ChunkStatus status = entry.getValue();
++
++ final JsonObject neighbour = new JsonObject();
++ neighboursWaitingForUs.add(neighbour);
++
++
++ neighbour.addProperty("chunkX", Integer.valueOf(holder.chunkX));
++ neighbour.addProperty("chunkZ", Integer.valueOf(holder.chunkZ));
++ neighbour.addProperty("waiting_for", Objects.toString(status));
++ }
++
++ ret.addProperty("fullchunkstatus", Objects.toString(this.getChunkStatus()));
++ ret.addProperty("fullchunkstatus_raw", Long.valueOf(this.chunkStatus));
++ ret.addProperty("generation_task", Objects.toString(this.generationTask));
++ ret.addProperty("requested_generation", Objects.toString(this.requestedGenStatus));
++ ret.addProperty("has_entity_load_task", Boolean.valueOf(this.entityDataLoadTask != null));
++ ret.addProperty("has_poi_load_task", Boolean.valueOf(this.poiDataLoadTask != null));
++
++ final UnloadTask entityDataUnload = this.entityDataUnload;
++ final UnloadTask poiDataUnload = this.poiDataUnload;
++ final UnloadTask chunkDataUnload = this.chunkDataUnload;
++
++ ret.add("entity_unload_completable", serializeCompletable(entityDataUnload == null ? null : entityDataUnload.completable()));
++ ret.add("poi_unload_completable", serializeCompletable(poiDataUnload == null ? null : poiDataUnload.completable()));
++ ret.add("chunk_unload_completable", serializeCompletable(chunkDataUnload == null ? null : chunkDataUnload.completable()));
++
++ final DelayedPrioritisedTask unloadTask = chunkDataUnload == null ? null : chunkDataUnload.task();
++ if (unloadTask == null) {
++ ret.addProperty("unload_task_priority", "null");
++ ret.addProperty("unload_task_priority_raw", "null");
++ } else {
++ ret.addProperty("unload_task_priority", Objects.toString(unloadTask.getPriority()));
++ ret.addProperty("unload_task_priority_raw", Integer.valueOf(unloadTask.getPriorityInternal()));
++ }
++
++ ret.addProperty("killed", Boolean.valueOf(this.killed));
++
++ return ret;
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/PriorityHolder.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/PriorityHolder.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..b4c56bf12dc8dd17452210ece4fd67411cc6b2fd
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/PriorityHolder.java
+@@ -0,0 +1,215 @@
++package io.papermc.paper.chunk.system.scheduling;
++
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
++import java.lang.invoke.VarHandle;
++
++public abstract class PriorityHolder {
++
++ protected volatile int priority;
++ protected static final VarHandle PRIORITY_HANDLE = ConcurrentUtil.getVarHandle(PriorityHolder.class, "priority", int.class);
++
++ protected static final int PRIORITY_SCHEDULED = Integer.MIN_VALUE >>> 0;
++ protected static final int PRIORITY_EXECUTED = Integer.MIN_VALUE >>> 1;
++
++ protected final int getPriorityVolatile() {
++ return (int)PRIORITY_HANDLE.getVolatile((PriorityHolder)this);
++ }
++
++ protected final int compareAndExchangePriorityVolatile(final int expect, final int update) {
++ return (int)PRIORITY_HANDLE.compareAndExchange((PriorityHolder)this, (int)expect, (int)update);
++ }
++
++ protected final int getAndOrPriorityVolatile(final int val) {
++ return (int)PRIORITY_HANDLE.getAndBitwiseOr((PriorityHolder)this, (int)val);
++ }
++
++ protected final void setPriorityPlain(final int val) {
++ PRIORITY_HANDLE.set((PriorityHolder)this, (int)val);
++ }
++
++ protected PriorityHolder(final PrioritisedExecutor.Priority priority) {
++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority " + priority);
++ }
++ this.setPriorityPlain(priority.priority);
++ }
++
++ // used only for debug json
++ public boolean isScheduled() {
++ return (this.getPriorityVolatile() & PRIORITY_SCHEDULED) != 0;
++ }
++
++ // returns false if cancelled
++ protected boolean markExecuting() {
++ return (this.getAndOrPriorityVolatile(PRIORITY_EXECUTED) & PRIORITY_EXECUTED) == 0;
++ }
++
++ protected boolean isMarkedExecuted() {
++ return (this.getPriorityVolatile() & PRIORITY_EXECUTED) != 0;
++ }
++
++ public void cancel() {
++ if ((this.getAndOrPriorityVolatile(PRIORITY_EXECUTED) & PRIORITY_EXECUTED) != 0) {
++ // cancelled already
++ return;
++ }
++ this.cancelScheduled();
++ }
++
++ public void schedule() {
++ int priority = this.getPriorityVolatile();
++
++ if ((priority & PRIORITY_SCHEDULED) != 0) {
++ throw new IllegalStateException("schedule() called twice");
++ }
++
++ if ((priority & PRIORITY_EXECUTED) != 0) {
++ // cancelled
++ return;
++ }
++
++ this.scheduleTask(PrioritisedExecutor.Priority.getPriority(priority));
++
++ int failures = 0;
++ for (;;) {
++ if (priority == (priority = this.compareAndExchangePriorityVolatile(priority, priority | PRIORITY_SCHEDULED))) {
++ return;
++ }
++
++ if ((priority & PRIORITY_SCHEDULED) != 0) {
++ throw new IllegalStateException("schedule() called twice");
++ }
++
++ if ((priority & PRIORITY_EXECUTED) != 0) {
++ // cancelled or executed
++ return;
++ }
++
++ this.setPriorityScheduled(PrioritisedExecutor.Priority.getPriority(priority));
++
++ ++failures;
++ for (int i = 0; i < failures; ++i) {
++ ConcurrentUtil.backoff();
++ }
++ }
++ }
++
++ public final PrioritisedExecutor.Priority getPriority() {
++ final int ret = this.getPriorityVolatile();
++ if ((ret & PRIORITY_EXECUTED) != 0) {
++ return PrioritisedExecutor.Priority.COMPLETING;
++ }
++ if ((ret & PRIORITY_SCHEDULED) != 0) {
++ return this.getScheduledPriority();
++ }
++ return PrioritisedExecutor.Priority.getPriority(ret);
++ }
++
++ public final void lowerPriority(final PrioritisedExecutor.Priority priority) {
++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority " + priority);
++ }
++
++ int failures = 0;
++ for (int curr = this.getPriorityVolatile();;) {
++ if ((curr & PRIORITY_EXECUTED) != 0) {
++ return;
++ }
++
++ if ((curr & PRIORITY_SCHEDULED) != 0) {
++ this.lowerPriorityScheduled(priority);
++ return;
++ }
++
++ if (!priority.isLowerPriority(curr)) {
++ return;
++ }
++
++ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, priority.priority))) {
++ return;
++ }
++
++ // failed, retry
++
++ ++failures;
++ for (int i = 0; i < failures; ++i) {
++ ConcurrentUtil.backoff();
++ }
++ }
++ }
++
++ public final void setPriority(final PrioritisedExecutor.Priority priority) {
++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority " + priority);
++ }
++
++ int failures = 0;
++ for (int curr = this.getPriorityVolatile();;) {
++ if ((curr & PRIORITY_EXECUTED) != 0) {
++ return;
++ }
++
++ if ((curr & PRIORITY_SCHEDULED) != 0) {
++ this.setPriorityScheduled(priority);
++ return;
++ }
++
++ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, priority.priority))) {
++ return;
++ }
++
++ // failed, retry
++
++ ++failures;
++ for (int i = 0; i < failures; ++i) {
++ ConcurrentUtil.backoff();
++ }
++ }
++ }
++
++ public final void raisePriority(final PrioritisedExecutor.Priority priority) {
++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
++ throw new IllegalArgumentException("Invalid priority " + priority);
++ }
++
++ int failures = 0;
++ for (int curr = this.getPriorityVolatile();;) {
++ if ((curr & PRIORITY_EXECUTED) != 0) {
++ return;
++ }
++
++ if ((curr & PRIORITY_SCHEDULED) != 0) {
++ this.raisePriorityScheduled(priority);
++ return;
++ }
++
++ if (!priority.isHigherPriority(curr)) {
++ return;
++ }
++
++ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, priority.priority))) {
++ return;
++ }
++
++ // failed, retry
++
++ ++failures;
++ for (int i = 0; i < failures; ++i) {
++ ConcurrentUtil.backoff();
++ }
++ }
++ }
++
++ protected abstract void cancelScheduled();
++
++ protected abstract PrioritisedExecutor.Priority getScheduledPriority();
++
++ protected abstract void scheduleTask(final PrioritisedExecutor.Priority priority);
++
++ protected abstract void lowerPriorityScheduled(final PrioritisedExecutor.Priority priority);
++
++ protected abstract void setPriorityScheduled(final PrioritisedExecutor.Priority priority);
++
++ protected abstract void raisePriorityScheduled(final PrioritisedExecutor.Priority priority);
++}
+diff --git a/src/main/java/io/papermc/paper/command/PaperCommand.java b/src/main/java/io/papermc/paper/command/PaperCommand.java
+index c9a2ac696f7cefc8b0715f53db3fc541f26b62f6..1e9105cf5ab2ff0ee847fafd00b41e1bd47f1d9e 100644
+--- a/src/main/java/io/papermc/paper/command/PaperCommand.java
++++ b/src/main/java/io/papermc/paper/command/PaperCommand.java
+@@ -1,5 +1,6 @@
+ package io.papermc.paper.command;
+
++import io.papermc.paper.command.subcommands.ChunkDebugCommand;
+ import io.papermc.paper.command.subcommands.EntityCommand;
+ import io.papermc.paper.command.subcommands.FixLightCommand;
+ import io.papermc.paper.command.subcommands.HeapDumpCommand;
+@@ -42,6 +43,7 @@ public final class PaperCommand extends Command {
+ commands.put(Set.of("reload"), new ReloadCommand());
+ commands.put(Set.of("version"), new VersionCommand());
+ commands.put(Set.of("fixlight"), new FixLightCommand());
++ commands.put(Set.of("debug", "chunkinfo", "holderinfo"), new ChunkDebugCommand());
+
+ return commands.entrySet().stream()
+ .flatMap(entry -> entry.getKey().stream().map(s -> Map.entry(s, entry.getValue())))
+diff --git a/src/main/java/io/papermc/paper/command/subcommands/ChunkDebugCommand.java b/src/main/java/io/papermc/paper/command/subcommands/ChunkDebugCommand.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..628c549b1436c3de75071ecd6182a9beadd4840b
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/command/subcommands/ChunkDebugCommand.java
+@@ -0,0 +1,264 @@
++package io.papermc.paper.command.subcommands;
++
++import io.papermc.paper.command.CommandUtil;
++import io.papermc.paper.command.PaperSubcommand;
++import java.io.File;
++import java.time.LocalDateTime;
++import java.time.format.DateTimeFormatter;
++import java.util.ArrayList;
++import java.util.Collections;
++import java.util.List;
++import java.util.Locale;
++import net.minecraft.server.MCUtil;
++import net.minecraft.server.MinecraftServer;
++import net.minecraft.server.level.ChunkHolder;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.world.level.chunk.ChunkAccess;
++import net.minecraft.world.level.chunk.ImposterProtoChunk;
++import net.minecraft.world.level.chunk.LevelChunk;
++import net.minecraft.world.level.chunk.ProtoChunk;
++import org.bukkit.Bukkit;
++import org.bukkit.command.CommandSender;
++import org.bukkit.craftbukkit.CraftWorld;
++import org.checkerframework.checker.nullness.qual.NonNull;
++import org.checkerframework.checker.nullness.qual.Nullable;
++import org.checkerframework.framework.qual.DefaultQualifier;
++
++import static net.kyori.adventure.text.Component.text;
++import static net.kyori.adventure.text.format.NamedTextColor.BLUE;
++import static net.kyori.adventure.text.format.NamedTextColor.DARK_AQUA;
++import static net.kyori.adventure.text.format.NamedTextColor.GREEN;
++import static net.kyori.adventure.text.format.NamedTextColor.RED;
++
++@DefaultQualifier(NonNull.class)
++public final class ChunkDebugCommand implements PaperSubcommand {
++ @Override
++ public boolean execute(final CommandSender sender, final String subCommand, final String[] args) {
++ switch (subCommand) {
++ case "debug" -> this.doDebug(sender, args);
++ case "chunkinfo" -> this.doChunkInfo(sender, args);
++ case "holderinfo" -> this.doHolderInfo(sender, args);
++ }
++ return true;
++ }
++
++ @Override
++ public List tabComplete(final CommandSender sender, final String subCommand, final String[] args) {
++ switch (subCommand) {
++ case "debug" -> {
++ if (args.length == 1) {
++ return CommandUtil.getListMatchingLast(sender, args, "help", "chunks");
++ }
++ }
++ case "holderinfo" -> {
++ List worldNames = new ArrayList<>();
++ worldNames.add("*");
++ for (org.bukkit.World world : Bukkit.getWorlds()) {
++ worldNames.add(world.getName());
++ }
++ if (args.length == 1) {
++ return CommandUtil.getListMatchingLast(sender, args, worldNames);
++ }
++ }
++ case "chunkinfo" -> {
++ List worldNames = new ArrayList<>();
++ worldNames.add("*");
++ for (org.bukkit.World world : Bukkit.getWorlds()) {
++ worldNames.add(world.getName());
++ }
++ if (args.length == 1) {
++ return CommandUtil.getListMatchingLast(sender, args, worldNames);
++ }
++ }
++ }
++ return Collections.emptyList();
++ }
++
++ private void doChunkInfo(final CommandSender sender, final String[] args) {
++ List worlds;
++ if (args.length < 1 || args[0].equals("*")) {
++ worlds = Bukkit.getWorlds();
++ } else {
++ worlds = new ArrayList<>(args.length);
++ for (final String arg : args) {
++ org.bukkit.@Nullable World world = Bukkit.getWorld(arg);
++ if (world == null) {
++ sender.sendMessage(text("World '" + arg + "' is invalid", RED));
++ return;
++ }
++ worlds.add(world);
++ }
++ }
++
++ int accumulatedTotal = 0;
++ int accumulatedInactive = 0;
++ int accumulatedBorder = 0;
++ int accumulatedTicking = 0;
++ int accumulatedEntityTicking = 0;
++
++ for (final org.bukkit.World bukkitWorld : worlds) {
++ final ServerLevel world = ((CraftWorld) bukkitWorld).getHandle();
++
++ int total = 0;
++ int inactive = 0;
++ int border = 0;
++ int ticking = 0;
++ int entityTicking = 0;
++
++ for (final ChunkHolder chunk : net.minecraft.server.ChunkSystem.getVisibleChunkHolders(world)) {
++ if (chunk.getFullChunkNowUnchecked() == null) {
++ continue;
++ }
++
++ ++total;
++
++ ChunkHolder.FullChunkStatus state = chunk.getFullStatus();
++
++ switch (state) {
++ case INACCESSIBLE -> ++inactive;
++ case BORDER -> ++border;
++ case TICKING -> ++ticking;
++ case ENTITY_TICKING -> ++entityTicking;
++ }
++ }
++
++ accumulatedTotal += total;
++ accumulatedInactive += inactive;
++ accumulatedBorder += border;
++ accumulatedTicking += ticking;
++ accumulatedEntityTicking += entityTicking;
++
++ sender.sendMessage(text().append(text("Chunks in ", BLUE), text(bukkitWorld.getName(), GREEN), text(":")));
++ sender.sendMessage(text().color(DARK_AQUA).append(
++ text("Total: ", BLUE), text(total),
++ text(" Inactive: ", BLUE), text(inactive),
++ text(" Border: ", BLUE), text(border),
++ text(" Ticking: ", BLUE), text(ticking),
++ text(" Entity: ", BLUE), text(entityTicking)
++ ));
++ }
++ if (worlds.size() > 1) {
++ sender.sendMessage(text().append(text("Chunks in ", BLUE), text("all listed worlds", GREEN), text(":", DARK_AQUA)));
++ sender.sendMessage(text().color(DARK_AQUA).append(
++ text("Total: ", BLUE), text(accumulatedTotal),
++ text(" Inactive: ", BLUE), text(accumulatedInactive),
++ text(" Border: ", BLUE), text(accumulatedBorder),
++ text(" Ticking: ", BLUE), text(accumulatedTicking),
++ text(" Entity: ", BLUE), text(accumulatedEntityTicking)
++ ));
++ }
++ }
++
++ private void doHolderInfo(final CommandSender sender, final String[] args) {
++ List worlds;
++ if (args.length < 1 || args[0].equals("*")) {
++ worlds = Bukkit.getWorlds();
++ } else {
++ worlds = new ArrayList<>(args.length);
++ for (final String arg : args) {
++ org.bukkit.@Nullable World world = Bukkit.getWorld(arg);
++ if (world == null) {
++ sender.sendMessage(text("World '" + arg + "' is invalid", RED));
++ return;
++ }
++ worlds.add(world);
++ }
++ }
++
++ int accumulatedTotal = 0;
++ int accumulatedCanUnload = 0;
++ int accumulatedNull = 0;
++ int accumulatedReadOnly = 0;
++ int accumulatedProtoChunk = 0;
++ int accumulatedFullChunk = 0;
++
++ for (final org.bukkit.World bukkitWorld : worlds) {
++ final ServerLevel world = ((CraftWorld) bukkitWorld).getHandle();
++
++ int total = 0;
++ int canUnload = 0;
++ int nullChunks = 0;
++ int readOnly = 0;
++ int protoChunk = 0;
++ int fullChunk = 0;
++
++ for (final ChunkHolder chunk : world.chunkTaskScheduler.chunkHolderManager.getOldChunkHolders()) { // Paper - change updating chunks map
++ final ChunkAccess lastChunk = chunk.getAvailableChunkNow();
++
++ ++total;
++
++ if (lastChunk == null) {
++ ++nullChunks;
++ } else if (lastChunk instanceof ImposterProtoChunk) {
++ ++readOnly;
++ } else if (lastChunk instanceof ProtoChunk) {
++ ++protoChunk;
++ } else if (lastChunk instanceof LevelChunk) {
++ ++fullChunk;
++ }
++
++ if (chunk.newChunkHolder.isSafeToUnload() == null) {
++ ++canUnload;
++ }
++ }
++
++ accumulatedTotal += total;
++ accumulatedCanUnload += canUnload;
++ accumulatedNull += nullChunks;
++ accumulatedReadOnly += readOnly;
++ accumulatedProtoChunk += protoChunk;
++ accumulatedFullChunk += fullChunk;
++
++ sender.sendMessage(text().append(text("Chunks in ", BLUE), text(bukkitWorld.getName(), GREEN), text(":")));
++ sender.sendMessage(text().color(DARK_AQUA).append(
++ text("Total: ", BLUE), text(total),
++ text(" Unloadable: ", BLUE), text(canUnload),
++ text(" Null: ", BLUE), text(nullChunks),
++ text(" ReadOnly: ", BLUE), text(readOnly),
++ text(" Proto: ", BLUE), text(protoChunk),
++ text(" Full: ", BLUE), text(fullChunk)
++ ));
++ }
++ if (worlds.size() > 1) {
++ sender.sendMessage(text().append(text("Chunks in ", BLUE), text("all listed worlds", GREEN), text(":", DARK_AQUA)));
++ sender.sendMessage(text().color(DARK_AQUA).append(
++ text("Total: ", BLUE), text(accumulatedTotal),
++ text(" Unloadable: ", BLUE), text(accumulatedCanUnload),
++ text(" Null: ", BLUE), text(accumulatedNull),
++ text(" ReadOnly: ", BLUE), text(accumulatedReadOnly),
++ text(" Proto: ", BLUE), text(accumulatedProtoChunk),
++ text(" Full: ", BLUE), text(accumulatedFullChunk)
++ ));
++ }
++ }
++
++ private void doDebug(final CommandSender sender, final String[] args) {
++ if (args.length < 1) {
++ sender.sendMessage(text("Use /paper debug [chunks] help for more information on a specific command", RED));
++ return;
++ }
++
++ final String debugType = args[0].toLowerCase(Locale.ENGLISH);
++ switch (debugType) {
++ case "chunks" -> {
++ if (args.length >= 2 && args[1].toLowerCase(Locale.ENGLISH).equals("help")) {
++ sender.sendMessage(text("Use /paper debug chunks [world] to dump loaded chunk information to a file", RED));
++ break;
++ }
++ File file = new File(new File(new File("."), "debug"),
++ "chunks-" + DateTimeFormatter.ofPattern("yyyy-MM-dd_HH.mm.ss").format(LocalDateTime.now()) + ".txt");
++ sender.sendMessage(text("Writing chunk information dump to " + file, GREEN));
++ try {
++ MCUtil.dumpChunks(file, false);
++ sender.sendMessage(text("Successfully written chunk information!", GREEN));
++ } catch (Throwable thr) {
++ MinecraftServer.LOGGER.warn("Failed to dump chunk information to file " + file.toString(), thr);
++ sender.sendMessage(text("Failed to dump chunk information, see console", RED));
++ }
++ }
++ // "help" & default
++ default -> sender.sendMessage(text("Use /paper debug [chunks] help for more information on a specific command", RED));
++ }
++ }
++
++}
+diff --git a/src/main/java/io/papermc/paper/util/TickThread.java b/src/main/java/io/papermc/paper/util/TickThread.java
+index d59885ee9c8b29d5bac34dce0597e345e5358c77..fc57850b80303fcade89ca95794f63910404a407 100644
+--- a/src/main/java/io/papermc/paper/util/TickThread.java
++++ b/src/main/java/io/papermc/paper/util/TickThread.java
+@@ -6,7 +6,7 @@ import net.minecraft.world.entity.Entity;
+ import org.bukkit.Bukkit;
+ import java.util.concurrent.atomic.AtomicInteger;
+
+-public final class TickThread extends Thread {
++public class TickThread extends Thread {
+
+ public static final boolean STRICT_THREAD_CHECKS = Boolean.getBoolean("paper.strict-thread-checks");
+
+@@ -16,6 +16,10 @@ public final class TickThread extends Thread {
+ }
+ }
+
++ /**
++ * @deprecated
++ */
++ @Deprecated
+ public static void softEnsureTickThread(final String reason) {
+ if (!STRICT_THREAD_CHECKS) {
+ return;
+@@ -23,6 +27,10 @@ public final class TickThread extends Thread {
+ ensureTickThread(reason);
+ }
+
++ /**
++ * @deprecated
++ */
++ @Deprecated
+ public static void ensureTickThread(final String reason) {
+ if (!isTickThread()) {
+ MinecraftServer.LOGGER.error("Thread " + Thread.currentThread().getName() + " failed main thread check: " + reason, new Throwable());
+@@ -66,14 +74,14 @@ public final class TickThread extends Thread {
+ }
+
+ public static boolean isTickThread() {
+- return Bukkit.isPrimaryThread();
++ return Thread.currentThread() instanceof TickThread;
+ }
+
+ public static boolean isTickThreadFor(final ServerLevel world, final int chunkX, final int chunkZ) {
+- return Bukkit.isPrimaryThread();
++ return Thread.currentThread() instanceof TickThread;
+ }
+
+ public static boolean isTickThreadFor(final Entity entity) {
+- return Bukkit.isPrimaryThread();
++ return Thread.currentThread() instanceof TickThread;
+ }
+ }
+diff --git a/src/main/java/io/papermc/paper/world/ChunkEntitySlices.java b/src/main/java/io/papermc/paper/world/ChunkEntitySlices.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..f597d65d56964297eeeed6c7e77703764178fee0
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/world/ChunkEntitySlices.java
+@@ -0,0 +1,601 @@
++package io.papermc.paper.world;
++
++import com.destroystokyo.paper.util.maplist.EntityList;
++import io.papermc.paper.chunk.system.entity.EntityLookup;
++import io.papermc.paper.util.TickThread;
++import it.unimi.dsi.fastutil.objects.Reference2ObjectMap;
++import it.unimi.dsi.fastutil.objects.Reference2ObjectOpenHashMap;
++import net.minecraft.nbt.CompoundTag;
++import net.minecraft.server.level.ChunkHolder;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.util.Mth;
++import net.minecraft.world.entity.Entity;
++import net.minecraft.world.entity.EntityType;
++import net.minecraft.world.entity.boss.EnderDragonPart;
++import net.minecraft.world.entity.boss.enderdragon.EnderDragon;
++import net.minecraft.world.level.ChunkPos;
++import net.minecraft.world.level.chunk.storage.EntityStorage;
++import net.minecraft.world.level.entity.Visibility;
++import net.minecraft.world.phys.AABB;
++import org.bukkit.craftbukkit.event.CraftEventFactory;
++import java.util.ArrayList;
++import java.util.Arrays;
++import java.util.Iterator;
++import java.util.List;
++import java.util.function.Predicate;
++
++public final class ChunkEntitySlices {
++
++ protected final int minSection;
++ protected final int maxSection;
++ public final int chunkX;
++ public final int chunkZ;
++ protected final ServerLevel world;
++
++ protected final EntityCollectionBySection allEntities;
++ protected final EntityCollectionBySection hardCollidingEntities;
++ protected final Reference2ObjectOpenHashMap, EntityCollectionBySection> entitiesByClass;
++ protected final EntityList entities = new EntityList();
++
++ public ChunkHolder.FullChunkStatus status;
++
++ protected boolean isTransient;
++
++ public boolean isTransient() {
++ return this.isTransient;
++ }
++
++ public void setTransient(final boolean value) {
++ this.isTransient = value;
++ }
++
++ // TODO implement container search optimisations
++
++ public ChunkEntitySlices(final ServerLevel world, final int chunkX, final int chunkZ, final ChunkHolder.FullChunkStatus status,
++ final int minSection, final int maxSection) { // inclusive, inclusive
++ this.minSection = minSection;
++ this.maxSection = maxSection;
++ this.chunkX = chunkX;
++ this.chunkZ = chunkZ;
++ this.world = world;
++
++ this.allEntities = new EntityCollectionBySection(this);
++ this.hardCollidingEntities = new EntityCollectionBySection(this);
++ this.entitiesByClass = new Reference2ObjectOpenHashMap<>();
++
++ this.status = status;
++ }
++
++ // Paper start - optimise CraftChunk#getEntities
++ public org.bukkit.entity.Entity[] getChunkEntities() {
++ List ret = new java.util.ArrayList<>();
++ final Entity[] entities = this.entities.getRawData();
++ for (int i = 0, size = Math.min(entities.length, this.entities.size()); i < size; ++i) {
++ final Entity entity = entities[i];
++ if (entity == null) {
++ continue;
++ }
++ final org.bukkit.entity.Entity bukkit = entity.getBukkitEntity();
++ if (bukkit != null && bukkit.isValid()) {
++ ret.add(bukkit);
++ }
++ }
++
++ return ret.toArray(new org.bukkit.entity.Entity[0]);
++ }
++
++ public CompoundTag save() {
++ final int len = this.entities.size();
++ if (len == 0) {
++ return null;
++ }
++
++ final Entity[] rawData = this.entities.getRawData();
++ final List collectedEntities = new ArrayList<>(len);
++ for (int i = 0; i < len; ++i) {
++ final Entity entity = rawData[i];
++ if (entity.shouldBeSaved()) {
++ collectedEntities.add(entity);
++ }
++ }
++
++ if (collectedEntities.isEmpty()) {
++ return null;
++ }
++
++ return EntityStorage.saveEntityChunk(collectedEntities, new ChunkPos(this.chunkX, this.chunkZ), this.world);
++ }
++
++ // returns true if this chunk has transient entities remaining
++ public boolean unload() {
++ final int len = this.entities.size();
++ final Entity[] collectedEntities = Arrays.copyOf(this.entities.getRawData(), len);
++
++ for (int i = 0; i < len; ++i) {
++ final Entity entity = collectedEntities[i];
++ if (entity.isRemoved()) {
++ // removed by us below
++ continue;
++ }
++ if (entity.shouldBeSaved()) {
++ entity.setRemoved(Entity.RemovalReason.UNLOADED_TO_CHUNK);
++ if (entity.isVehicle()) {
++ // we cannot assume that these entities are contained within this chunk, because entities can
++ // desync - so we need to remove them all
++ for (final Entity passenger : entity.getIndirectPassengers()) {
++ passenger.setRemoved(Entity.RemovalReason.UNLOADED_TO_CHUNK);
++ }
++ }
++ }
++ }
++
++ return this.entities.size() != 0;
++ }
++
++ private List getAllEntities() {
++ final int len = this.entities.size();
++ if (len == 0) {
++ return new ArrayList<>();
++ }
++
++ final Entity[] rawData = this.entities.getRawData();
++ final List collectedEntities = new ArrayList<>(len);
++ for (int i = 0; i < len; ++i) {
++ collectedEntities.add(rawData[i]);
++ }
++
++ return collectedEntities;
++ }
++
++ public void callEntitiesLoadEvent() {
++ CraftEventFactory.callEntitiesLoadEvent(this.world, new ChunkPos(this.chunkX, this.chunkZ), this.getAllEntities());
++ }
++
++ public void callEntitiesUnloadEvent() {
++ CraftEventFactory.callEntitiesUnloadEvent(this.world, new ChunkPos(this.chunkX, this.chunkZ), this.getAllEntities());
++ }
++ // Paper end - optimise CraftChunk#getEntities
++
++ public boolean isEmpty() {
++ return this.entities.size() == 0;
++ }
++
++ public void mergeInto(final ChunkEntitySlices slices) {
++ final Entity[] entities = this.entities.getRawData();
++ for (int i = 0, size = Math.min(entities.length, this.entities.size()); i < size; ++i) {
++ final Entity entity = entities[i];
++ slices.addEntity(entity, entity.sectionY);
++ }
++ }
++
++ private boolean preventStatusUpdates;
++ public boolean startPreventingStatusUpdates() {
++ final boolean ret = this.preventStatusUpdates;
++ this.preventStatusUpdates = true;
++ return ret;
++ }
++
++ public void stopPreventingStatusUpdates(final boolean prev) {
++ this.preventStatusUpdates = prev;
++ }
++
++ public void updateStatus(final ChunkHolder.FullChunkStatus status, final EntityLookup lookup) {
++ this.status = status;
++
++ final Entity[] entities = this.entities.getRawData();
++
++ for (int i = 0, size = this.entities.size(); i < size; ++i) {
++ final Entity entity = entities[i];
++
++ final Visibility oldVisibility = EntityLookup.getEntityStatus(entity);
++ entity.chunkStatus = status;
++ final Visibility newVisibility = EntityLookup.getEntityStatus(entity);
++
++ lookup.entityStatusChange(entity, this, oldVisibility, newVisibility, false, false, false);
++ }
++ }
++
++ public boolean addEntity(final Entity entity, final int chunkSection) {
++ if (!this.entities.add(entity)) {
++ return false;
++ }
++ entity.chunkStatus = this.status;
++ final int sectionIndex = chunkSection - this.minSection;
++
++ this.allEntities.addEntity(entity, sectionIndex);
++
++ if (entity.hardCollides()) {
++ this.hardCollidingEntities.addEntity(entity, sectionIndex);
++ }
++
++ for (final Iterator, EntityCollectionBySection>> iterator =
++ this.entitiesByClass.reference2ObjectEntrySet().fastIterator(); iterator.hasNext();) {
++ final Reference2ObjectMap.Entry, EntityCollectionBySection> entry = iterator.next();
++
++ if (entry.getKey().isInstance(entity)) {
++ entry.getValue().addEntity(entity, sectionIndex);
++ }
++ }
++
++ return true;
++ }
++
++ public boolean removeEntity(final Entity entity, final int chunkSection) {
++ if (!this.entities.remove(entity)) {
++ return false;
++ }
++ entity.chunkStatus = null;
++ final int sectionIndex = chunkSection - this.minSection;
++
++ this.allEntities.removeEntity(entity, sectionIndex);
++
++ if (entity.hardCollides()) {
++ this.hardCollidingEntities.removeEntity(entity, sectionIndex);
++ }
++
++ for (final Iterator, EntityCollectionBySection>> iterator =
++ this.entitiesByClass.reference2ObjectEntrySet().fastIterator(); iterator.hasNext();) {
++ final Reference2ObjectMap.Entry, EntityCollectionBySection> entry = iterator.next();
++
++ if (entry.getKey().isInstance(entity)) {
++ entry.getValue().removeEntity(entity, sectionIndex);
++ }
++ }
++
++ return true;
++ }
++
++ public void getHardCollidingEntities(final Entity except, final AABB box, final List into, final Predicate super Entity> predicate) {
++ this.hardCollidingEntities.getEntities(except, box, into, predicate);
++ }
++
++ public void getEntities(final Entity except, final AABB box, final List into, final Predicate super Entity> predicate) {
++ this.allEntities.getEntitiesWithEnderDragonParts(except, box, into, predicate);
++ }
++
++ public void getEntitiesWithoutDragonParts(final Entity except, final AABB box, final List into, final Predicate super Entity> predicate) {
++ this.allEntities.getEntities(except, box, into, predicate);
++ }
++
++ public void getEntities(final EntityType> type, final AABB box, final List super T> into,
++ final Predicate super T> predicate) {
++ this.allEntities.getEntities(type, box, (List)into, (Predicate)predicate);
++ }
++
++ protected EntityCollectionBySection initClass(final Class extends Entity> clazz) {
++ final EntityCollectionBySection ret = new EntityCollectionBySection(this);
++
++ for (int sectionIndex = 0; sectionIndex < this.allEntities.entitiesBySection.length; ++sectionIndex) {
++ final BasicEntityList sectionEntities = this.allEntities.entitiesBySection[sectionIndex];
++ if (sectionEntities == null) {
++ continue;
++ }
++
++ final Entity[] storage = sectionEntities.storage;
++
++ for (int i = 0, len = Math.min(storage.length, sectionEntities.size()); i < len; ++i) {
++ final Entity entity = storage[i];
++
++ if (clazz.isInstance(entity)) {
++ ret.addEntity(entity, sectionIndex);
++ }
++ }
++ }
++
++ return ret;
++ }
++
++ public void getEntities(final Class extends T> clazz, final Entity except, final AABB box, final List super T> into,
++ final Predicate super T> predicate) {
++ EntityCollectionBySection collection = this.entitiesByClass.get(clazz);
++ if (collection != null) {
++ collection.getEntitiesWithEnderDragonParts(except, clazz, box, (List)into, (Predicate)predicate);
++ } else {
++ this.entitiesByClass.putIfAbsent(clazz, collection = this.initClass(clazz));
++ collection.getEntitiesWithEnderDragonParts(except, clazz, box, (List)into, (Predicate)predicate);
++ }
++ }
++
++ protected static final class BasicEntityList {
++
++ protected static final Entity[] EMPTY = new Entity[0];
++ protected static final int DEFAULT_CAPACITY = 4;
++
++ protected E[] storage;
++ protected int size;
++
++ public BasicEntityList() {
++ this(0);
++ }
++
++ public BasicEntityList(final int cap) {
++ this.storage = (E[])(cap <= 0 ? EMPTY : new Entity[cap]);
++ }
++
++ public boolean isEmpty() {
++ return this.size == 0;
++ }
++
++ public int size() {
++ return this.size;
++ }
++
++ private void resize() {
++ if (this.storage == EMPTY) {
++ this.storage = (E[])new Entity[DEFAULT_CAPACITY];
++ } else {
++ this.storage = Arrays.copyOf(this.storage, this.storage.length * 2);
++ }
++ }
++
++ public void add(final E entity) {
++ final int idx = this.size++;
++ if (idx >= this.storage.length) {
++ this.resize();
++ this.storage[idx] = entity;
++ } else {
++ this.storage[idx] = entity;
++ }
++ }
++
++ public int indexOf(final E entity) {
++ final E[] storage = this.storage;
++
++ for (int i = 0, len = Math.min(this.storage.length, this.size); i < len; ++i) {
++ if (storage[i] == entity) {
++ return i;
++ }
++ }
++
++ return -1;
++ }
++
++ public boolean remove(final E entity) {
++ final int idx = this.indexOf(entity);
++ if (idx == -1) {
++ return false;
++ }
++
++ final int size = --this.size;
++ final E[] storage = this.storage;
++ if (idx != size) {
++ System.arraycopy(storage, idx + 1, storage, idx, size - idx);
++ }
++
++ storage[size] = null;
++
++ return true;
++ }
++
++ public boolean has(final E entity) {
++ return this.indexOf(entity) != -1;
++ }
++ }
++
++ protected static final class EntityCollectionBySection {
++
++ protected final ChunkEntitySlices manager;
++ protected final long[] nonEmptyBitset;
++ protected final BasicEntityList[] entitiesBySection;
++ protected int count;
++
++ public EntityCollectionBySection(final ChunkEntitySlices manager) {
++ this.manager = manager;
++
++ final int sectionCount = manager.maxSection - manager.minSection + 1;
++
++ this.nonEmptyBitset = new long[(sectionCount + (Long.SIZE - 1)) >>> 6]; // (sectionCount + (Long.SIZE - 1)) / Long.SIZE
++ this.entitiesBySection = new BasicEntityList[sectionCount];
++ }
++
++ public void addEntity(final Entity entity, final int sectionIndex) {
++ BasicEntityList list = this.entitiesBySection[sectionIndex];
++
++ if (list != null && list.has(entity)) {
++ return;
++ }
++
++ if (list == null) {
++ this.entitiesBySection[sectionIndex] = list = new BasicEntityList<>();
++ this.nonEmptyBitset[sectionIndex >>> 6] |= (1L << (sectionIndex & (Long.SIZE - 1)));
++ }
++
++ list.add(entity);
++ ++this.count;
++ }
++
++ public void removeEntity(final Entity entity, final int sectionIndex) {
++ final BasicEntityList list = this.entitiesBySection[sectionIndex];
++
++ if (list == null || !list.remove(entity)) {
++ return;
++ }
++
++ --this.count;
++
++ if (list.isEmpty()) {
++ this.entitiesBySection[sectionIndex] = null;
++ this.nonEmptyBitset[sectionIndex >>> 6] ^= (1L << (sectionIndex & (Long.SIZE - 1)));
++ }
++ }
++
++ public void getEntities(final Entity except, final AABB box, final List into, final Predicate super Entity> predicate) {
++ if (this.count == 0) {
++ return;
++ }
++
++ final int minSection = this.manager.minSection;
++ final int maxSection = this.manager.maxSection;
++
++ final int min = Mth.clamp(Mth.floor(box.minY - 2.0) >> 4, minSection, maxSection);
++ final int max = Mth.clamp(Mth.floor(box.maxY + 2.0) >> 4, minSection, maxSection);
++
++ final BasicEntityList[] entitiesBySection = this.entitiesBySection;
++
++ for (int section = min; section <= max; ++section) {
++ final BasicEntityList list = entitiesBySection[section - minSection];
++
++ if (list == null) {
++ continue;
++ }
++
++ final Entity[] storage = list.storage;
++
++ for (int i = 0, len = Math.min(storage.length, list.size()); i < len; ++i) {
++ final Entity entity = storage[i];
++
++ if (entity == null || entity == except || !entity.getBoundingBox().intersects(box)) {
++ continue;
++ }
++
++ if (predicate != null && !predicate.test(entity)) {
++ continue;
++ }
++
++ into.add(entity);
++ }
++ }
++ }
++
++ public void getEntitiesWithEnderDragonParts(final Entity except, final AABB box, final List into,
++ final Predicate super Entity> predicate) {
++ if (this.count == 0) {
++ return;
++ }
++
++ final int minSection = this.manager.minSection;
++ final int maxSection = this.manager.maxSection;
++
++ final int min = Mth.clamp(Mth.floor(box.minY - 2.0) >> 4, minSection, maxSection);
++ final int max = Mth.clamp(Mth.floor(box.maxY + 2.0) >> 4, minSection, maxSection);
++
++ final BasicEntityList[] entitiesBySection = this.entitiesBySection;
++
++ for (int section = min; section <= max; ++section) {
++ final BasicEntityList list = entitiesBySection[section - minSection];
++
++ if (list == null) {
++ continue;
++ }
++
++ final Entity[] storage = list.storage;
++
++ for (int i = 0, len = Math.min(storage.length, list.size()); i < len; ++i) {
++ final Entity entity = storage[i];
++
++ if (entity == null || entity == except || !entity.getBoundingBox().intersects(box)) {
++ continue;
++ }
++
++ if (predicate == null || predicate.test(entity)) {
++ into.add(entity);
++ } // else: continue to test the ender dragon parts
++
++ if (entity instanceof EnderDragon) {
++ for (final EnderDragonPart part : ((EnderDragon)entity).subEntities) {
++ if (part == except || !part.getBoundingBox().intersects(box)) {
++ continue;
++ }
++
++ if (predicate != null && !predicate.test(part)) {
++ continue;
++ }
++
++ into.add(part);
++ }
++ }
++ }
++ }
++ }
++
++ public void getEntitiesWithEnderDragonParts(final Entity except, final Class> clazz, final AABB box, final List into,
++ final Predicate super Entity> predicate) {
++ if (this.count == 0) {
++ return;
++ }
++
++ final int minSection = this.manager.minSection;
++ final int maxSection = this.manager.maxSection;
++
++ final int min = Mth.clamp(Mth.floor(box.minY - 2.0) >> 4, minSection, maxSection);
++ final int max = Mth.clamp(Mth.floor(box.maxY + 2.0) >> 4, minSection, maxSection);
++
++ final BasicEntityList[] entitiesBySection = this.entitiesBySection;
++
++ for (int section = min; section <= max; ++section) {
++ final BasicEntityList list = entitiesBySection[section - minSection];
++
++ if (list == null) {
++ continue;
++ }
++
++ final Entity[] storage = list.storage;
++
++ for (int i = 0, len = Math.min(storage.length, list.size()); i < len; ++i) {
++ final Entity entity = storage[i];
++
++ if (entity == null || entity == except || !entity.getBoundingBox().intersects(box)) {
++ continue;
++ }
++
++ if (predicate == null || predicate.test(entity)) {
++ into.add(entity);
++ } // else: continue to test the ender dragon parts
++
++ if (entity instanceof EnderDragon) {
++ for (final EnderDragonPart part : ((EnderDragon)entity).subEntities) {
++ if (part == except || !part.getBoundingBox().intersects(box) || !clazz.isInstance(part)) {
++ continue;
++ }
++
++ if (predicate != null && !predicate.test(part)) {
++ continue;
++ }
++
++ into.add(part);
++ }
++ }
++ }
++ }
++ }
++
++ public void getEntities(final EntityType> type, final AABB box, final List super T> into,
++ final Predicate super T> predicate) {
++ if (this.count == 0) {
++ return;
++ }
++
++ final int minSection = this.manager.minSection;
++ final int maxSection = this.manager.maxSection;
++
++ final int min = Mth.clamp(Mth.floor(box.minY - 2.0) >> 4, minSection, maxSection);
++ final int max = Mth.clamp(Mth.floor(box.maxY + 2.0) >> 4, minSection, maxSection);
++
++ final BasicEntityList[] entitiesBySection = this.entitiesBySection;
++
++ for (int section = min; section <= max; ++section) {
++ final BasicEntityList list = entitiesBySection[section - minSection];
++
++ if (list == null) {
++ continue;
++ }
++
++ final Entity[] storage = list.storage;
++
++ for (int i = 0, len = Math.min(storage.length, list.size()); i < len; ++i) {
++ final Entity entity = storage[i];
++
++ if (entity == null || (type != null && entity.getType() != type) || !entity.getBoundingBox().intersects(box)) {
++ continue;
++ }
++
++ if (predicate != null && !predicate.test((T)entity)) {
++ continue;
++ }
++
++ into.add((T)entity);
++ }
++ }
++ }
++ }
++}
+diff --git a/src/main/java/net/minecraft/network/Connection.java b/src/main/java/net/minecraft/network/Connection.java
+index 9b96d05094c3b83f6388d479fdca8800453ccd1d..b5b10c57401e1b27175b1960839a81382d89b73f 100644
+--- a/src/main/java/net/minecraft/network/Connection.java
++++ b/src/main/java/net/minecraft/network/Connection.java
+@@ -89,6 +89,28 @@ public class Connection extends SimpleChannelInboundHandler> {
+ private float averageSentPackets;
+ private int tickCount;
+ private boolean handlingFault;
++ // Paper start - add pending task queue
++ private final Queue pendingTasks = new java.util.concurrent.ConcurrentLinkedQueue<>();
++ public void execute(final Runnable run) {
++ if (this.channel == null || !this.channel.isRegistered()) {
++ run.run();
++ return;
++ }
++ final boolean queue = !this.queue.isEmpty();
++ if (!queue) {
++ this.channel.eventLoop().execute(run);
++ } else {
++ this.pendingTasks.add(run);
++ if (this.queue.isEmpty()) {
++ // something flushed async, dump tasks now
++ Runnable r;
++ while ((r = this.pendingTasks.poll()) != null) {
++ this.channel.eventLoop().execute(r);
++ }
++ }
++ }
++ }
++ // Paper end - add pending task queue
+
+ public Connection(PacketFlow side) {
+ this.receiving = side;
+@@ -247,6 +269,7 @@ public class Connection extends SimpleChannelInboundHandler