Dieser Commit ist enthalten in:
NotMyFault 2021-05-30 23:49:46 +02:00
Ursprung 53681ccc59
Commit b599769f8c
Es konnte kein GPG-Schlüssel zu dieser Signatur gefunden werden
GPG-Schlüssel-ID: 158F5701A6AAD00C
39 geänderte Dateien mit 56 neuen und 4457 gelöschten Zeilen

Datei anzeigen

@ -100,6 +100,8 @@ dependencies {
testImplementation("org.checkerframework:checker-qual:3.13.0")
testImplementation("org.spigotmc:spigot-api:1.16.5-R0.1-SNAPSHOT") { isTransitive = true }
api("com.intellectualsites.paster:Paster:1.0.1-SNAPSHOT")
api("org.lz4:lz4-java:1.7.1")
api("net.jpountz:lz4-java-stream:1.0.0") { isTransitive = false }
// Third party
implementation("org.bstats:bstats-bukkit:2.2.1")
implementation("org.bstats:bstats-base:2.2.1")
@ -165,6 +167,12 @@ tasks.named<ShadowJar>("shadowJar") {
relocate("com.github.luben", "com.boydti.fawe.zstd") {
include(dependency("com.github.luben:zstd-jni:1.4.9-5"))
}
relocate("net.jpountz", "com.boydti.fawe.jpountz") {
include(dependency("net.jpountz:lz4-java-stream:1.0.0"))
}
relocate("org.lz4", "com.boydti.fawe.lz4") {
include(dependency("org.lz4:lz4-java:1.7.1"))
}
}
}

Datei anzeigen

@ -8,12 +8,12 @@ import com.boydti.fawe.config.Settings;
import com.boydti.fawe.object.collection.BitArray;
import com.boydti.fawe.util.MathMan;
import com.boydti.fawe.util.TaskManager;
import com.boydti.fawe.util.UnsafeUtility;
import com.mojang.datafixers.util.Either;
import com.sk89q.worldedit.math.BlockVector3;
import com.sk89q.worldedit.world.block.BlockState;
import com.sk89q.worldedit.world.block.BlockTypesCache;
import io.papermc.lib.PaperLib;
import net.jpountz.util.UnsafeUtils;
import net.minecraft.server.v1_15_R1.BiomeBase;
import net.minecraft.server.v1_15_R1.BiomeStorage;
import net.minecraft.server.v1_15_R1.Block;
@ -101,7 +101,7 @@ public final class BukkitAdapter_1_15_2 extends NMSAdapter {
declaredSetLightNibbleArray.setAccessible(true);
methodSetLightNibbleArray = MethodHandles.lookup().unreflect(declaredSetLightNibbleArray);
Unsafe unsafe = UnsafeUtils.getUNSAFE();
Unsafe unsafe = UnsafeUtility.getUNSAFE();
fieldLock = DataPaletteBlock.class.getDeclaredField("j");
fieldLockOffset = unsafe.objectFieldOffset(fieldLock);
@ -122,7 +122,7 @@ public final class BukkitAdapter_1_15_2 extends NMSAdapter {
protected static boolean setSectionAtomic(ChunkSection[] sections, ChunkSection expected, ChunkSection value, int layer) {
long offset = ((long) layer << CHUNKSECTION_SHIFT) + CHUNKSECTION_BASE;
if (layer >= 0 && layer < sections.length) {
return UnsafeUtils.getUNSAFE().compareAndSwapObject(sections, offset, expected, value);
return UnsafeUtility.getUNSAFE().compareAndSwapObject(sections, offset, expected, value);
}
return false;
}
@ -131,7 +131,7 @@ public final class BukkitAdapter_1_15_2 extends NMSAdapter {
//todo there has to be a better way to do this. Maybe using a() in DataPaletteBlock which acquires the lock in NMS?
try {
synchronized (section) {
Unsafe unsafe = UnsafeUtils.getUNSAFE();
Unsafe unsafe = UnsafeUtility.getUNSAFE();
DataPaletteBlock<IBlockData> blocks = section.getBlocks();
ReentrantLock currentLock = (ReentrantLock) unsafe.getObject(blocks, fieldLockOffset);
if (currentLock instanceof DelegateLock) {

Datei anzeigen

@ -8,12 +8,12 @@ import com.boydti.fawe.config.Settings;
import com.boydti.fawe.object.collection.BitArrayUnstretched;
import com.boydti.fawe.util.MathMan;
import com.boydti.fawe.util.TaskManager;
import com.boydti.fawe.util.UnsafeUtility;
import com.mojang.datafixers.util.Either;
import com.sk89q.worldedit.math.BlockVector3;
import com.sk89q.worldedit.world.block.BlockState;
import com.sk89q.worldedit.world.block.BlockTypesCache;
import io.papermc.lib.PaperLib;
import net.jpountz.util.UnsafeUtils;
import net.minecraft.server.v1_16_R1.BiomeBase;
import net.minecraft.server.v1_16_R1.BiomeStorage;
import net.minecraft.server.v1_16_R1.Block;
@ -98,7 +98,7 @@ public final class BukkitAdapter_1_16_1 extends NMSAdapter {
declaredGetVisibleChunk.setAccessible(true);
methodGetVisibleChunk = MethodHandles.lookup().unreflect(declaredGetVisibleChunk);
Unsafe unsafe = UnsafeUtils.getUNSAFE();
Unsafe unsafe = UnsafeUtility.getUNSAFE();
fieldLock = DataPaletteBlock.class.getDeclaredField("j");
fieldLockOffset = unsafe.objectFieldOffset(fieldLock);
@ -119,7 +119,7 @@ public final class BukkitAdapter_1_16_1 extends NMSAdapter {
protected static boolean setSectionAtomic(ChunkSection[] sections, ChunkSection expected, ChunkSection value, int layer) {
long offset = ((long) layer << CHUNKSECTION_SHIFT) + CHUNKSECTION_BASE;
if (layer >= 0 && layer < sections.length) {
return UnsafeUtils.getUNSAFE().compareAndSwapObject(sections, offset, expected, value);
return UnsafeUtility.getUNSAFE().compareAndSwapObject(sections, offset, expected, value);
}
return false;
}
@ -128,7 +128,7 @@ public final class BukkitAdapter_1_16_1 extends NMSAdapter {
//todo there has to be a better way to do this. Maybe using a() in DataPaletteBlock which acquires the lock in NMS?
try {
synchronized (section) {
Unsafe unsafe = UnsafeUtils.getUNSAFE();
Unsafe unsafe = UnsafeUtility.getUNSAFE();
DataPaletteBlock<IBlockData> blocks = section.getBlocks();
ReentrantLock currentLock = (ReentrantLock) unsafe.getObject(blocks, fieldLockOffset);
if (currentLock instanceof DelegateLock) {

Datei anzeigen

@ -8,12 +8,12 @@ import com.boydti.fawe.config.Settings;
import com.boydti.fawe.object.collection.BitArrayUnstretched;
import com.boydti.fawe.util.MathMan;
import com.boydti.fawe.util.TaskManager;
import com.boydti.fawe.util.UnsafeUtility;
import com.mojang.datafixers.util.Either;
import com.sk89q.worldedit.math.BlockVector3;
import com.sk89q.worldedit.world.block.BlockState;
import com.sk89q.worldedit.world.block.BlockTypesCache;
import io.papermc.lib.PaperLib;
import net.jpountz.util.UnsafeUtils;
import net.minecraft.server.v1_16_R2.BiomeBase;
import net.minecraft.server.v1_16_R2.BiomeStorage;
import net.minecraft.server.v1_16_R2.Block;
@ -98,7 +98,7 @@ public final class BukkitAdapter_1_16_2 extends NMSAdapter {
declaredGetVisibleChunk.setAccessible(true);
methodGetVisibleChunk = MethodHandles.lookup().unreflect(declaredGetVisibleChunk);
Unsafe unsafe = UnsafeUtils.getUNSAFE();
Unsafe unsafe = UnsafeUtility.getUNSAFE();
fieldLock = DataPaletteBlock.class.getDeclaredField("j");
fieldLockOffset = unsafe.objectFieldOffset(fieldLock);
@ -119,7 +119,7 @@ public final class BukkitAdapter_1_16_2 extends NMSAdapter {
protected static boolean setSectionAtomic(ChunkSection[] sections, ChunkSection expected, ChunkSection value, int layer) {
long offset = ((long) layer << CHUNKSECTION_SHIFT) + CHUNKSECTION_BASE;
if (layer >= 0 && layer < sections.length) {
return UnsafeUtils.getUNSAFE().compareAndSwapObject(sections, offset, expected, value);
return UnsafeUtility.getUNSAFE().compareAndSwapObject(sections, offset, expected, value);
}
return false;
}
@ -128,7 +128,7 @@ public final class BukkitAdapter_1_16_2 extends NMSAdapter {
//todo there has to be a better way to do this. Maybe using a() in DataPaletteBlock which acquires the lock in NMS?
try {
synchronized (section) {
Unsafe unsafe = UnsafeUtils.getUNSAFE();
Unsafe unsafe = UnsafeUtility.getUNSAFE();
DataPaletteBlock<IBlockData> blocks = section.getBlocks();
ReentrantLock currentLock = (ReentrantLock) unsafe.getObject(blocks, fieldLockOffset);
if (currentLock instanceof DelegateLock) {

Datei anzeigen

@ -8,12 +8,12 @@ import com.boydti.fawe.config.Settings;
import com.boydti.fawe.object.collection.BitArrayUnstretched;
import com.boydti.fawe.util.MathMan;
import com.boydti.fawe.util.TaskManager;
import com.boydti.fawe.util.UnsafeUtility;
import com.mojang.datafixers.util.Either;
import com.sk89q.worldedit.math.BlockVector3;
import com.sk89q.worldedit.world.block.BlockState;
import com.sk89q.worldedit.world.block.BlockTypesCache;
import io.papermc.lib.PaperLib;
import net.jpountz.util.UnsafeUtils;
import net.minecraft.server.v1_16_R3.BiomeBase;
import net.minecraft.server.v1_16_R3.BiomeStorage;
import net.minecraft.server.v1_16_R3.Block;
@ -98,7 +98,7 @@ public final class BukkitAdapter_1_16_5 extends NMSAdapter {
declaredGetVisibleChunk.setAccessible(true);
methodGetVisibleChunk = MethodHandles.lookup().unreflect(declaredGetVisibleChunk);
Unsafe unsafe = UnsafeUtils.getUNSAFE();
Unsafe unsafe = UnsafeUtility.getUNSAFE();
fieldLock = DataPaletteBlock.class.getDeclaredField("j");
fieldLockOffset = unsafe.objectFieldOffset(fieldLock);
@ -119,7 +119,7 @@ public final class BukkitAdapter_1_16_5 extends NMSAdapter {
protected static boolean setSectionAtomic(ChunkSection[] sections, ChunkSection expected, ChunkSection value, int layer) {
long offset = ((long) layer << CHUNKSECTION_SHIFT) + CHUNKSECTION_BASE;
if (layer >= 0 && layer < sections.length) {
return UnsafeUtils.getUNSAFE().compareAndSwapObject(sections, offset, expected, value);
return UnsafeUtility.getUNSAFE().compareAndSwapObject(sections, offset, expected, value);
}
return false;
}
@ -128,7 +128,7 @@ public final class BukkitAdapter_1_16_5 extends NMSAdapter {
//todo there has to be a better way to do this. Maybe using a() in DataPaletteBlock which acquires the lock in NMS?
try {
synchronized (section) {
Unsafe unsafe = UnsafeUtils.getUNSAFE();
Unsafe unsafe = UnsafeUtility.getUNSAFE();
DataPaletteBlock<IBlockData> blocks = section.getBlocks();
ReentrantLock currentLock = (ReentrantLock) unsafe.getObject(blocks, fieldLockOffset);
if (currentLock instanceof DelegateLock) {

Datei anzeigen

@ -1,4 +1,3 @@
import com.github.jengelman.gradle.plugins.shadow.tasks.ShadowJar
import org.gradle.plugins.ide.idea.model.IdeaModel
plugins {
@ -52,6 +51,8 @@ dependencies {
api("com.github.intellectualsites.plotsquared:PlotSquared-API:4.514") { isTransitive = false }
api("com.plotsquared:PlotSquared-Core:5.13.11") { isTransitive = false }
api("com.intellectualsites.paster:Paster:1.0.1-SNAPSHOT")
compileOnly("net.jpountz:lz4-java-stream:1.0.0") { isTransitive = false }
compileOnly("org.lz4:lz4-java:1.7.1")
}
tasks.named<Test>("test") {

Datei anzeigen

@ -5,6 +5,7 @@ import com.boydti.fawe.config.Settings;
import com.boydti.fawe.jnbt.streamer.IntValueReader;
import com.boydti.fawe.object.IntTriple;
import com.boydti.fawe.util.MainUtil;
import com.boydti.fawe.util.UnsafeUtility;
import com.sk89q.jnbt.CompoundTag;
import com.sk89q.jnbt.IntTag;
import com.sk89q.jnbt.Tag;
@ -22,7 +23,6 @@ import com.sk89q.worldedit.world.block.BaseBlock;
import com.sk89q.worldedit.world.block.BlockState;
import com.sk89q.worldedit.world.block.BlockStateHolder;
import com.sk89q.worldedit.world.block.BlockTypes;
import net.jpountz.util.UnsafeUtils;
import org.apache.logging.log4j.Logger;
import javax.annotation.Nullable;
@ -279,7 +279,7 @@ public class DiskOptimizedClipboard extends LinearClipboard implements Closeable
if (cb == null || !cb.isDirect()) {
return;
}
UnsafeUtils.getUNSAFE().invokeCleaner(cb);
UnsafeUtility.getUNSAFE().invokeCleaner(cb);
}
@Override

Datei anzeigen

@ -32,7 +32,6 @@ import net.jpountz.lz4.LZ4Compressor;
import net.jpountz.lz4.LZ4Factory;
import net.jpountz.lz4.LZ4FastDecompressor;
import net.jpountz.lz4.LZ4InputStream;
import net.jpountz.lz4.LZ4Utils;
import org.apache.logging.log4j.Logger;
import org.jetbrains.annotations.NotNull;
@ -254,7 +253,7 @@ public class MainUtil {
private static final LZ4FastDecompressor DECOMPRESSOR = FACTORY.fastDecompressor();
public static int getMaxCompressedLength(int size) {
return LZ4Utils.maxCompressedLength(size);
return COMPRESSOR.maxCompressedLength(size);
}
public static int compress(byte[] bytes, int length, byte[] buffer, OutputStream out, Deflater deflate) throws IOException {

Datei anzeigen

@ -0,0 +1,27 @@
package com.boydti.fawe.util;
import sun.misc.Unsafe;
import java.lang.reflect.Field;
/**
* This is an internal class not meant to be used outside of the FAWE internals.
*/
public class UnsafeUtility {
private static final Unsafe UNSAFE;
static {
try {
Field theUnsafe = Unsafe.class.getDeclaredField("theUnsafe");
theUnsafe.setAccessible(true);
UNSAFE = (Unsafe) theUnsafe.get(null);
} catch (NoSuchFieldException | IllegalAccessException e) {
throw new ExceptionInInitializerError("Cannot access Unsafe");
}
}
public static Unsafe getUNSAFE() {
return UNSAFE;
}
}

Datei anzeigen

@ -1,242 +0,0 @@
package net.jpountz.lz4;
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import net.jpountz.util.SafeUtils;
import java.io.EOFException;
import java.io.FilterInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.zip.Checksum;
import static net.jpountz.lz4.LZ4BlockOutputStream.COMPRESSION_LEVEL_BASE;
import static net.jpountz.lz4.LZ4BlockOutputStream.COMPRESSION_METHOD_LZ4;
import static net.jpountz.lz4.LZ4BlockOutputStream.COMPRESSION_METHOD_RAW;
import static net.jpountz.lz4.LZ4BlockOutputStream.HEADER_LENGTH;
import static net.jpountz.lz4.LZ4BlockOutputStream.MAGIC;
import static net.jpountz.lz4.LZ4BlockOutputStream.MAGIC_LENGTH;
/**
* {@link InputStream} implementation to decode data written with
* {@link LZ4BlockOutputStream}. This class is not thread-safe and does not
* support {@link #mark(int)}/{@link #reset()}.
*
* @see LZ4BlockOutputStream
*/
public final class LZ4BlockInputStream extends FilterInputStream {
private final LZ4FastDecompressor decompressor;
private final Checksum checksum;
private byte[] buffer;
private byte[] compressedBuffer;
private int originalLen;
private int o;
private boolean finished;
/**
* Create a new {@link InputStream}.
*
* @param in the {@link InputStream} to poll
* @param decompressor the {@link LZ4FastDecompressor decompressor} instance to
* use
* @param checksum the {@link Checksum} instance to use, must be
* equivalent to the instance which has been used to
* write the stream
*/
public LZ4BlockInputStream(InputStream in, LZ4FastDecompressor decompressor, Checksum checksum) {
super(in);
this.decompressor = decompressor;
this.checksum = checksum;
this.buffer = new byte[0];
this.compressedBuffer = new byte[HEADER_LENGTH];
o = originalLen = 0;
finished = false;
}
public LZ4BlockInputStream(InputStream in, LZ4FastDecompressor decompressor) {
this(in, decompressor, null);
}
/**
* Create a new instance which uses the fastest {@link LZ4FastDecompressor} available.
*
* @see LZ4Factory#fastestInstance()
* @see #LZ4BlockInputStream(InputStream, LZ4FastDecompressor)
*/
public LZ4BlockInputStream(InputStream in) {
this(in, LZ4Factory.fastestInstance().fastDecompressor());
}
@Override
public int available() throws IOException {
return originalLen - o;
}
@Override
public int read() throws IOException {
if (finished) {
return -1;
}
if (o == originalLen) {
refill();
}
if (finished) {
return -1;
}
return buffer[o++] & 0xFF;
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
SafeUtils.checkRange(b, off, len);
if (finished) {
return -1;
}
if (o == originalLen) {
refill();
}
if (finished) {
return -1;
}
len = Math.min(len, originalLen - o);
System.arraycopy(buffer, o, b, off, len);
o += len;
return len;
}
@Override
public int read(byte[] b) throws IOException {
return read(b, 0, b.length);
}
@Override
public long skip(long n) throws IOException {
if (finished) {
return -1;
}
if (o == originalLen) {
refill();
}
if (finished) {
return -1;
}
final int skipped = (int) Math.min(n, originalLen - o);
o += skipped;
return skipped;
}
private void refill() throws IOException {
readFully(compressedBuffer, HEADER_LENGTH);
for (int i = 0; i < MAGIC_LENGTH; ++i) {
if (compressedBuffer[i] != MAGIC[i]) {
throw new IOException("Stream is corrupted");
}
}
final int token = compressedBuffer[MAGIC_LENGTH] & 0xFF;
final int compressionMethod = token & 0xF0;
final int compressionLevel = COMPRESSION_LEVEL_BASE + (token & 0x0F);
if (compressionMethod != COMPRESSION_METHOD_RAW && compressionMethod != COMPRESSION_METHOD_LZ4) {
throw new IOException("Stream is corrupted");
}
final int compressedLen = SafeUtils.readIntLE(compressedBuffer, MAGIC_LENGTH + 1);
originalLen = SafeUtils.readIntLE(compressedBuffer, MAGIC_LENGTH + 5);
final int check = SafeUtils.readIntLE(compressedBuffer, MAGIC_LENGTH + 9);
assert HEADER_LENGTH == MAGIC_LENGTH + 13;
if (originalLen > 1 << compressionLevel
|| originalLen < 0
|| compressedLen < 0
|| (originalLen == 0 && compressedLen != 0)
|| (originalLen != 0 && compressedLen == 0)
|| (compressionMethod == COMPRESSION_METHOD_RAW && originalLen != compressedLen)) {
throw new IOException("Stream is corrupted");
}
if (originalLen == 0 && compressedLen == 0) {
if (check != 0) {
throw new IOException("Stream is corrupted");
}
finished = true;
return;
}
if (buffer.length < originalLen) {
buffer = new byte[Math.max(originalLen, buffer.length * 3 / 2)];
}
switch (compressionMethod) {
case COMPRESSION_METHOD_RAW:
readFully(buffer, originalLen);
break;
case COMPRESSION_METHOD_LZ4:
if (compressedBuffer.length < originalLen) {
compressedBuffer = new byte[Math.max(compressedLen, compressedBuffer.length * 3 / 2)];
}
readFully(compressedBuffer, compressedLen);
try {
final int compressedLen2 = decompressor.decompress(compressedBuffer, 0, buffer, 0, originalLen);
if (compressedLen != compressedLen2) {
throw new IOException("Stream is corrupted");
}
} catch (LZ4Exception e) {
throw new IOException("Stream is corrupted", e);
}
break;
default:
throw new AssertionError();
}
if (checksum != null) {
checksum.reset();
checksum.update(buffer, 0, originalLen);
if ((int) checksum.getValue() != check) {
throw new IOException("Stream is corrupted");
}
}
o = 0;
}
private void readFully(byte[] b, int len) throws IOException {
int read = 0;
while (read < len) {
final int r = in.read(b, read, len - read);
if (r < 0) {
throw new EOFException("Stream ended prematurely");
}
read += r;
}
assert len == read;
}
@Override
public boolean markSupported() {
return false;
}
@SuppressWarnings("sync-override")
@Override
public void mark(int readlimit) {
// unsupported
}
@SuppressWarnings("sync-override")
@Override
public void reset() throws IOException {
throw new IOException("mark/reset not supported");
}
@Override
public String toString() {
return getClass().getSimpleName() + "(in=" + in
+ ", decompressor=" + decompressor + ", checksum=" + checksum + ")";
}
}

Datei anzeigen

@ -1,280 +0,0 @@
package net.jpountz.lz4;
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This file has been modified for use in the FAWE project.
*/
import net.jpountz.util.SafeUtils;
import java.io.FilterOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.zip.Checksum;
/**
* Streaming LZ4 (not compatible with the LZ4 Frame format).
* This class compresses data into fixed-size blocks of compressed data.
* This class uses its own format and is not compatible with the LZ4 Frame format.
* @see LZ4BlockInputStream
*/
public final class LZ4BlockOutputStream extends FilterOutputStream {
static final byte[] MAGIC = new byte[]{'L', 'Z', '4', 'B', 'l', 'o', 'c', 'k'};
static final int MAGIC_LENGTH = MAGIC.length;
static final int HEADER_LENGTH =
MAGIC_LENGTH // magic bytes
+ 1 // token
+ 4 // compressed length
+ 4 // decompressed length
+ 4; // checksum
static final int COMPRESSION_LEVEL_BASE = 10;
static final int MIN_BLOCK_SIZE = 64;
static final int MAX_BLOCK_SIZE = 1 << (COMPRESSION_LEVEL_BASE + 0x0F);
static final int COMPRESSION_METHOD_RAW = 0x10;
static final int COMPRESSION_METHOD_LZ4 = 0x20;
static final int DEFAULT_SEED = 0x9747b28c;
private static int compressionLevel(int blockSize) {
if (blockSize < MIN_BLOCK_SIZE) {
throw new IllegalArgumentException("blockSize must be >= " + MIN_BLOCK_SIZE + ", got " + blockSize);
} else if (blockSize > MAX_BLOCK_SIZE) {
throw new IllegalArgumentException("blockSize must be <= " + MAX_BLOCK_SIZE + ", got " + blockSize);
}
int compressionLevel = 32 - Integer.numberOfLeadingZeros(blockSize - 1); // ceil of log2
assert (1 << compressionLevel) >= blockSize;
assert blockSize * 2 > (1 << compressionLevel);
compressionLevel = Math.max(0, compressionLevel - COMPRESSION_LEVEL_BASE);
assert compressionLevel >= 0 && compressionLevel <= 0x0F;
return compressionLevel;
}
private final int blockSize;
private final int compressionLevel;
private final LZ4Compressor compressor;
private final Checksum checksum;
private final byte[] buffer;
private final byte[] compressedBuffer;
private final boolean syncFlush;
private boolean finished;
private int o;
/**
* Creates a new {@link OutputStream} with configurable block size. Large
* blocks require more memory at compression and decompression time but
* should improve the compression ratio.
*
* @param out the {@link OutputStream} to feed
* @param blockSize the maximum number of bytes to try to compress at once,
* must be &gt;= 64 and &lt;= 32 M
* @param compressor the {@link LZ4Compressor} instance to use to compress
* data
* @param checksum the {@link Checksum} instance to use to check data for
* integrity.
* @param syncFlush true if pending data should also be flushed on {@link #flush()}
*/
public LZ4BlockOutputStream(OutputStream out, int blockSize, LZ4Compressor compressor, Checksum checksum, boolean syncFlush) {
super(out);
this.blockSize = blockSize;
this.compressor = compressor;
this.checksum = checksum;
this.compressionLevel = compressionLevel(blockSize);
this.buffer = new byte[blockSize];
final int compressedBlockSize = HEADER_LENGTH + compressor.maxCompressedLength(blockSize);
this.compressedBuffer = new byte[compressedBlockSize];
this.syncFlush = syncFlush;
o = 0;
finished = false;
System.arraycopy(MAGIC, 0, compressedBuffer, 0, MAGIC_LENGTH);
}
/**
* Creates a new instance which checks stream integrity and doesn't sync flush.
*
* @param out the {@link OutputStream} to feed
* @param blockSize the maximum number of bytes to try to compress at once,
* must be &gt;= 64 and &lt;= 32 M
* @param compressor the {@link LZ4Compressor} instance to use to compress
* data
*
* @see #LZ4BlockOutputStream(OutputStream, int, LZ4Compressor, Checksum, boolean)
*/
public LZ4BlockOutputStream(OutputStream out, int blockSize, LZ4Compressor compressor) {
this(out, blockSize, compressor, null, false);
}
/**
* Creates a new instance which compresses with the standard LZ4 compression
* algorithm.
*
* @param out the {@link OutputStream} to feed
* @param blockSize the maximum number of bytes to try to compress at once,
* must be &gt;= 64 and &lt;= 32 M
*
* @see #LZ4BlockOutputStream(OutputStream, int, LZ4Compressor)
* @see LZ4Factory#fastCompressor()
*/
public LZ4BlockOutputStream(OutputStream out, int blockSize) {
this(out, blockSize, LZ4Factory.fastestInstance().fastCompressor());
}
/**
* Creates a new instance which compresses into blocks of 64 KB.
*
* @param out the {@link OutputStream} to feed
*
* @see #LZ4BlockOutputStream(OutputStream, int)
*/
public LZ4BlockOutputStream(OutputStream out) {
this(out, 1 << 16);
}
private void ensureNotFinished() {
if (finished) {
throw new IllegalStateException("This stream is already closed");
}
}
@Override
public void write(int b) throws IOException {
ensureNotFinished();
if (o == blockSize) {
flushBufferedData();
}
buffer[o++] = (byte) b;
}
@Override
public void write(byte[] b, int off, int len) throws IOException {
SafeUtils.checkRange(b, off, len);
ensureNotFinished();
while (o + len > blockSize) {
final int l = blockSize - o;
System.arraycopy(b, off, buffer, o, blockSize - o);
o = blockSize;
flushBufferedData();
off += l;
len -= l;
}
System.arraycopy(b, off, buffer, o, len);
o += len;
}
@Override
public void write(byte[] b) throws IOException {
ensureNotFinished();
write(b, 0, b.length);
}
@Override
public void close() throws IOException {
if (!finished) {
finish();
}
if (out != null) {
out.close();
out = null;
}
}
private void flushBufferedData() throws IOException {
if (o == 0) {
return;
}
final int check;
if (checksum != null) {
checksum.reset();
checksum.update(buffer, 0, o);
check = (int) checksum.getValue();
} else {
check = 1;
}
int compressedLength = compressor.compress(buffer, 0, o, compressedBuffer, HEADER_LENGTH);
final int compressMethod;
if (compressedLength >= o) {
compressMethod = COMPRESSION_METHOD_RAW;
compressedLength = o;
System.arraycopy(buffer, 0, compressedBuffer, HEADER_LENGTH, o);
} else {
compressMethod = COMPRESSION_METHOD_LZ4;
}
compressedBuffer[MAGIC_LENGTH] = (byte) (compressMethod | compressionLevel);
writeIntLE(compressedLength, compressedBuffer, MAGIC_LENGTH + 1);
writeIntLE(o, compressedBuffer, MAGIC_LENGTH + 5);
writeIntLE(check, compressedBuffer, MAGIC_LENGTH + 9);
assert MAGIC_LENGTH + 13 == HEADER_LENGTH;
out.write(compressedBuffer, 0, HEADER_LENGTH + compressedLength);
o = 0;
}
/**
* Flushes this compressed {@link OutputStream}.
*
* If the stream has been created with <code>syncFlush=true</code>, pending
* data will be compressed and appended to the underlying {@link OutputStream}
* before calling {@link OutputStream#flush()} on the underlying stream.
* Otherwise, this method just flushes the underlying stream, so pending
* data might not be available for reading until {@link #finish()} or
* {@link #close()} is called.
*/
@Override
public void flush() throws IOException {
if (out != null) {
if (syncFlush) {
flushBufferedData();
}
out.flush();
}
}
/**
* Same as {@link #close()} except that it doesn't close the underlying stream.
* This can be useful if you want to keep on using the underlying stream.
*
* @throws IOException if an I/O error occurs.
*/
public void finish() throws IOException {
ensureNotFinished();
flushBufferedData();
compressedBuffer[MAGIC_LENGTH] = (byte) (COMPRESSION_METHOD_RAW | compressionLevel);
writeIntLE(0, compressedBuffer, MAGIC_LENGTH + 1);
writeIntLE(0, compressedBuffer, MAGIC_LENGTH + 5);
writeIntLE(0, compressedBuffer, MAGIC_LENGTH + 9);
assert MAGIC_LENGTH + 13 == HEADER_LENGTH;
out.write(compressedBuffer, 0, HEADER_LENGTH);
finished = true;
out.flush();
}
private static void writeIntLE(int i, byte[] buf, int off) {
buf[off++] = (byte) i;
buf[off++] = (byte) (i >>> 8);
buf[off++] = (byte) (i >>> 16);
buf[off++] = (byte) (i >>> 24);
}
@Override
public String toString() {
return getClass().getSimpleName() + "(out=" + out + ", blockSize=" + blockSize
+ ", compressor=" + compressor + ", checksum=" + checksum + ")";
}
}

Datei anzeigen

@ -1,238 +0,0 @@
package net.jpountz.lz4;
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import static net.jpountz.lz4.LZ4Constants.COPY_LENGTH;
import static net.jpountz.lz4.LZ4Constants.LAST_LITERALS;
import static net.jpountz.lz4.LZ4Constants.ML_BITS;
import static net.jpountz.lz4.LZ4Constants.ML_MASK;
import static net.jpountz.lz4.LZ4Constants.RUN_MASK;
import static net.jpountz.util.ByteBufferUtils.readByte;
import static net.jpountz.util.ByteBufferUtils.readInt;
import static net.jpountz.util.ByteBufferUtils.readLong;
import static net.jpountz.util.ByteBufferUtils.writeByte;
import static net.jpountz.util.ByteBufferUtils.writeInt;
import static net.jpountz.util.ByteBufferUtils.writeLong;
enum LZ4ByteBufferUtils {
;
static int hash(ByteBuffer buf, int i) {
return LZ4Utils.hash(readInt(buf, i));
}
static int hash64k(ByteBuffer buf, int i) {
return LZ4Utils.hash64k(readInt(buf, i));
}
static boolean readIntEquals(ByteBuffer buf, int i, int j) {
return buf.getInt(i) == buf.getInt(j);
}
static void safeIncrementalCopy(ByteBuffer dest, int matchOff, int dOff, int matchLen) {
for (int i = 0; i < matchLen; ++i) {
dest.put(dOff + i, dest.get(matchOff + i));
}
}
static void wildIncrementalCopy(ByteBuffer dest, int matchOff, int dOff, int matchCopyEnd) {
if (dOff - matchOff < 4) {
for (int i = 0; i < 4; ++i) {
writeByte(dest, dOff + i, readByte(dest, matchOff + i));
}
dOff += 4;
matchOff += 4;
int dec = 0;
assert dOff >= matchOff && dOff - matchOff < 8;
switch (dOff - matchOff) {
case 1:
matchOff -= 3;
break;
case 2:
matchOff -= 2;
break;
case 3:
matchOff -= 3;
dec = -1;
break;
case 5:
dec = 1;
break;
case 6:
dec = 2;
break;
case 7:
dec = 3;
break;
default:
break;
}
writeInt(dest, dOff, readInt(dest, matchOff));
dOff += 4;
matchOff -= dec;
} else if (dOff - matchOff < COPY_LENGTH) {
writeLong(dest, dOff, readLong(dest, matchOff));
dOff += dOff - matchOff;
}
while (dOff < matchCopyEnd) {
writeLong(dest, dOff, readLong(dest, matchOff));
dOff += 8;
matchOff += 8;
}
}
static int commonBytes(ByteBuffer src, int ref, int sOff, int srcLimit) {
int matchLen = 0;
while (sOff <= srcLimit - 8) {
if (readLong(src, sOff) == readLong(src, ref)) {
matchLen += 8;
ref += 8;
sOff += 8;
} else {
final int zeroBits;
if (src.order() == ByteOrder.BIG_ENDIAN) {
zeroBits = Long.numberOfLeadingZeros(readLong(src, sOff) ^ readLong(src, ref));
} else {
zeroBits = Long.numberOfTrailingZeros(readLong(src, sOff) ^ readLong(src, ref));
}
return matchLen + (zeroBits >>> 3);
}
}
while (sOff < srcLimit && readByte(src, ref++) == readByte(src, sOff++)) {
++matchLen;
}
return matchLen;
}
static int commonBytesBackward(ByteBuffer b, int o1, int o2, int l1, int l2) {
int count = 0;
while (o1 > l1 && o2 > l2 && b.get(--o1) == b.get(--o2)) {
++count;
}
return count;
}
static void safeArraycopy(ByteBuffer src, int sOff, ByteBuffer dest, int dOff, int len) {
for (int i = 0; i < len; ++i) {
dest.put(dOff + i, src.get(sOff + i));
}
}
static void wildArraycopy(ByteBuffer src, int sOff, ByteBuffer dest, int dOff, int len) {
assert src.order().equals(dest.order());
try {
for (int i = 0; i < len; i += 8) {
dest.putLong(dOff + i, src.getLong(sOff + i));
}
} catch (IndexOutOfBoundsException e) {
throw new LZ4Exception("Malformed input at offset " + sOff);
}
}
static int encodeSequence(ByteBuffer src, int anchor, int matchOff, int matchRef, int matchLen, ByteBuffer dest, int dOff, int destEnd) {
final int runLen = matchOff - anchor;
final int tokenOff = dOff++;
if (dOff + runLen + (2 + 1 + LAST_LITERALS) + (runLen >>> 8) > destEnd) {
throw new LZ4Exception("maxDestLen is too small");
}
int token;
if (runLen >= RUN_MASK) {
token = (byte) (RUN_MASK << ML_BITS);
dOff = writeLen(runLen - RUN_MASK, dest, dOff);
} else {
token = runLen << ML_BITS;
}
// copy literals
wildArraycopy(src, anchor, dest, dOff, runLen);
dOff += runLen;
// encode offset
final int matchDec = matchOff - matchRef;
dest.put(dOff++, (byte) matchDec);
dest.put(dOff++, (byte) (matchDec >>> 8));
// encode match len
matchLen -= 4;
if (dOff + (1 + LAST_LITERALS) + (matchLen >>> 8) > destEnd) {
throw new LZ4Exception("maxDestLen is too small");
}
if (matchLen >= ML_MASK) {
token |= ML_MASK;
dOff = writeLen(matchLen - RUN_MASK, dest, dOff);
} else {
token |= matchLen;
}
dest.put(tokenOff, (byte) token);
return dOff;
}
static int lastLiterals(ByteBuffer src, int sOff, int srcLen, ByteBuffer dest, int dOff, int destEnd) {
final int runLen = srcLen;
if (dOff + runLen + 1 + (runLen + 255 - RUN_MASK) / 255 > destEnd) {
throw new LZ4Exception();
}
if (runLen >= RUN_MASK) {
dest.put(dOff++, (byte) (RUN_MASK << ML_BITS));
dOff = writeLen(runLen - RUN_MASK, dest, dOff);
} else {
dest.put(dOff++, (byte) (runLen << ML_BITS));
}
// copy literals
safeArraycopy(src, sOff, dest, dOff, runLen);
dOff += runLen;
return dOff;
}
static int writeLen(int len, ByteBuffer dest, int dOff) {
while (len >= 0xFF) {
dest.put(dOff++, (byte) 0xFF);
len -= 0xFF;
}
dest.put(dOff++, (byte) len);
return dOff;
}
static class Match {
int start, ref, len;
void fix(int correction) {
start += correction;
ref += correction;
len -= correction;
}
int end() {
return start + len;
}
}
static void copyTo(Match m1, Match m2) {
m2.len = m1.len;
m2.start = m1.start;
m2.ref = m1.ref;
}
}

Datei anzeigen

@ -1,128 +0,0 @@
package net.jpountz.lz4;
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.nio.ByteBuffer;
import java.util.Arrays;
/**
* LZ4 compressor.
* <p>
* Instances of this class are thread-safe.
*/
public abstract class LZ4Compressor {
/**
* Return the maximum compressed length for an input of size <code>length</code>.
*/
@SuppressWarnings("static-method")
public final int maxCompressedLength(int length) {
return LZ4Utils.maxCompressedLength(length);
}
/**
* Compress <code>src[srcOff:srcOff+srcLen]</code> into
* <code>dest[destOff:destOff+destLen]</code> and return the compressed
* length.
* <p>
* This method will throw a {@link LZ4Exception} if this compressor is unable
* to compress the input into less than <code>maxDestLen</code> bytes. To
* prevent this exception to be thrown, you should make sure that
* <code>maxDestLen >= maxCompressedLength(srcLen)</code>.
*
* @return the compressed size
* @throws LZ4Exception if maxDestLen is too small
*/
public abstract int compress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen);
/**
* Compress <code>src[srcOff:srcOff+srcLen]</code> into
* <code>dest[destOff:destOff+destLen]</code> and return the compressed
* length.
* <p>
* This method will throw a {@link LZ4Exception} if this compressor is unable
* to compress the input into less than <code>maxDestLen</code> bytes. To
* prevent this exception to be thrown, you should make sure that
* <code>maxDestLen >= maxCompressedLength(srcLen)</code>.
* <p>
* {@link ByteBuffer} positions remain unchanged.
*
* @return the compressed size
* @throws LZ4Exception if maxDestLen is too small
*/
public abstract int compress(ByteBuffer src, int srcOff, int srcLen, ByteBuffer dest, int destOff, int maxDestLen);
/**
* Convenience method, equivalent to calling
* {@link #compress(byte[], int, int, byte[], int, int) compress(src, srcOff, srcLen, dest, destOff, dest.length - destOff)}.
*/
public final int compress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff) {
return compress(src, srcOff, srcLen, dest, destOff, dest.length - destOff);
}
/**
* Convenience method, equivalent to calling
* {@link #compress(byte[], int, int, byte[], int) compress(src, 0, src.length, dest, 0)}.
*/
public final int compress(byte[] src, byte[] dest) {
return compress(src, 0, src.length, dest, 0);
}
/**
* Convenience method which returns <code>src[srcOff:srcOff+srcLen]</code>
* compressed.
* <p><b><span style="color:red">Warning</span></b>: this method has an
* important overhead due to the fact that it needs to allocate a buffer to
* compress into, and then needs to resize this buffer to the actual
* compressed length.</p>
* <p>Here is how this method is implemented:</p>
* <pre>
* final int maxCompressedLength = maxCompressedLength(srcLen);
* final byte[] compressed = new byte[maxCompressedLength];
* final int compressedLength = compress(src, srcOff, srcLen, compressed, 0);
* return Arrays.copyOf(compressed, compressedLength);
* </pre>
*/
public final byte[] compress(byte[] src, int srcOff, int srcLen) {
final int maxCompressedLength = maxCompressedLength(srcLen);
final byte[] compressed = new byte[maxCompressedLength];
final int compressedLength = compress(src, srcOff, srcLen, compressed, 0);
return Arrays.copyOf(compressed, compressedLength);
}
/**
* Convenience method, equivalent to calling
* {@link #compress(byte[], int, int) compress(src, 0, src.length)}.
*/
public final byte[] compress(byte[] src) {
return compress(src, 0, src.length);
}
/**
* Compress <code>src</code> into <code>dest</code>. Calling this method
* will update the positions of both {@link ByteBuffer}s.
*/
public final void compress(ByteBuffer src, ByteBuffer dest) {
final int cpLen = compress(src, src.position(), src.remaining(), dest, dest.position(), dest.remaining());
src.position(src.limit());
dest.position(dest.position() + cpLen);
}
@Override
public String toString() {
return getClass().getSimpleName();
}
}

Datei anzeigen

@ -1,53 +0,0 @@
package net.jpountz.lz4;
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
enum LZ4Constants {
;
static final int DEFAULT_COMPRESSION_LEVEL = 8 + 1;
static final int MAX_COMPRESSION_LEVEL = 16 + 1;
static final int MEMORY_USAGE = 14;
static final int NOT_COMPRESSIBLE_DETECTION_LEVEL = 6;
static final int MIN_MATCH = 4;
static final int HASH_LOG = MEMORY_USAGE - 2;
static final int HASH_TABLE_SIZE = 1 << HASH_LOG;
static final int SKIP_STRENGTH = Math.max(NOT_COMPRESSIBLE_DETECTION_LEVEL, 2);
static final int COPY_LENGTH = 8;
static final int LAST_LITERALS = 5;
static final int MF_LIMIT = COPY_LENGTH + MIN_MATCH;
static final int MIN_LENGTH = MF_LIMIT + 1;
static final int MAX_DISTANCE = 1 << 16;
static final int ML_BITS = 4;
static final int ML_MASK = (1 << ML_BITS) - 1;
static final int RUN_BITS = 8 - ML_BITS;
static final int RUN_MASK = (1 << RUN_BITS) - 1;
static final int LZ4_64K_LIMIT = (1 << 16) + (MF_LIMIT - 1);
static final int HASH_LOG_64K = HASH_LOG + 1;
static final int HASH_TABLE_SIZE_64K = 1 << HASH_LOG_64K;
static final int HASH_LOG_HC = 15;
static final int HASH_TABLE_SIZE_HC = 1 << HASH_LOG_HC;
static final int OPTIMAL_ML = ML_MASK - 1 + MIN_MATCH;
}

Datei anzeigen

@ -1,25 +0,0 @@
package net.jpountz.lz4;
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @deprecated Use {@link LZ4FastDecompressor} instead.
*/
@Deprecated
public interface LZ4Decompressor {
int decompress(byte[] src, int srcOff, byte[] dest, int destOff, int destLen);
}

Datei anzeigen

@ -1,36 +0,0 @@
package net.jpountz.lz4;
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* LZ4 compression or decompression error.
*/
public class LZ4Exception extends RuntimeException {
private static final long serialVersionUID = 1L;
public LZ4Exception(String msg, Throwable t) {
super(msg, t);
}
public LZ4Exception(String msg) {
super(msg);
}
public LZ4Exception() {
super();
}
}

Datei anzeigen

@ -1,272 +0,0 @@
package net.jpountz.lz4;
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import net.jpountz.util.Native;
import net.jpountz.util.Utils;
import java.lang.reflect.Constructor;
import java.lang.reflect.Field;
import java.lang.reflect.InvocationTargetException;
import java.util.Arrays;
import static net.jpountz.lz4.LZ4Constants.DEFAULT_COMPRESSION_LEVEL;
import static net.jpountz.lz4.LZ4Constants.MAX_COMPRESSION_LEVEL;
/**
* Entry point for the LZ4 API.
* <p>
* This class has 3 instances<ul>
* <li>a {@link #nativeInstance() native} instance which is a JNI binding to
* <a href="http://code.google.com/p/lz4/">the original LZ4 C implementation</a>.
* <li>a {@link #safeInstance() safe Java} instance which is a pure Java port
* of the original C library,</li>
* <li>an {@link #unsafeInstance() unsafe Java} instance which is a Java port
* using the unofficial {@link sun.misc.Unsafe} API.
* </ul>
* <p>
* Only the {@link #safeInstance() safe instance} is guaranteed to work on your
* JVM, as a consequence it is advised to use the {@link #fastestInstance()} or
* {@link #fastestJavaInstance()} to pull a {@link LZ4Factory} instance.
* <p>
* All methods from this class are very costly, so you should get an instance
* once, and then reuse it whenever possible. This is typically done by storing
* a {@link LZ4Factory} instance in a static field.
*/
public final class LZ4Factory {
private static LZ4Factory instance(String impl) {
try {
return new LZ4Factory(impl);
} catch (Exception e) {
throw new AssertionError(e);
}
}
private static LZ4Factory NATIVE_INSTANCE,
JAVA_UNSAFE_INSTANCE,
JAVA_SAFE_INSTANCE;
/**
* Return a {@link LZ4Factory} instance that returns compressors and
* decompressors that are native bindings to the original C library.
* <p>
* Please note that this instance has some traps you should be aware of:<ol>
* <li>Upon loading this instance, files will be written to the temporary
* directory of the system. Although these files are supposed to be deleted
* when the JVM exits, they might remain on systems that don't support
* removal of files being used such as Windows.
* <li>The instance can only be loaded once per JVM. This can be a problem
* if your application uses multiple class loaders (such as most servlet
* containers): this instance will only be available to the children of the
* class loader which has loaded it. As a consequence, it is advised to
* either not use this instance in webapps or to put this library in the lib
* directory of your servlet container so that it is loaded by the system
* class loader.
* </ol>
*/
public static synchronized LZ4Factory nativeInstance() {
if (NATIVE_INSTANCE == null) {
NATIVE_INSTANCE = instance("JNI");
}
return NATIVE_INSTANCE;
}
/**
* Return a {@link LZ4Factory} instance that returns compressors and
* decompressors that are written with Java's official API.
*/
public static synchronized LZ4Factory safeInstance() {
if (JAVA_SAFE_INSTANCE == null) {
JAVA_SAFE_INSTANCE = instance("JavaSafe");
}
return JAVA_SAFE_INSTANCE;
}
/**
* Return a {@link LZ4Factory} instance that returns compressors and
* decompressors that may use {@link sun.misc.Unsafe} to speed up compression
* and decompression.
*/
public static synchronized LZ4Factory unsafeInstance() {
if (JAVA_UNSAFE_INSTANCE == null) {
JAVA_UNSAFE_INSTANCE = instance("JavaUnsafe");
}
return JAVA_UNSAFE_INSTANCE;
}
/**
* Return the fastest available {@link LZ4Factory} instance which does not
* rely on JNI bindings. It first tries to load the
* {@link #unsafeInstance() unsafe instance}, and then the
* {@link #safeInstance() safe Java instance} if the JVM doesn't have a
* working {@link sun.misc.Unsafe}.
*/
public static LZ4Factory fastestJavaInstance() {
if (Utils.isUnalignedAccessAllowed()) {
try {
return unsafeInstance();
} catch (Throwable t) {
return safeInstance();
}
} else {
return safeInstance();
}
}
/**
* Return the fastest available {@link LZ4Factory} instance. If the class
* loader is the system class loader and if the
* {@link #nativeInstance() native instance} loads successfully, then the
* {@link #nativeInstance() native instance} is returned, otherwise the
* {@link #fastestJavaInstance() fastest Java instance} is returned.
* <p>
* Please read {@link #nativeInstance() javadocs of nativeInstance()} before
* using this method.
*/
public static LZ4Factory fastestInstance() {
if (Native.isLoaded()
|| Native.class.getClassLoader() == ClassLoader.getSystemClassLoader()) {
try {
return nativeInstance();
} catch (Throwable t) {
return fastestJavaInstance();
}
} else {
return fastestJavaInstance();
}
}
@SuppressWarnings("unchecked")
private static <T> T classInstance(String cls) throws NoSuchFieldException, SecurityException, ClassNotFoundException, IllegalArgumentException, IllegalAccessException {
ClassLoader loader = LZ4Factory.class.getClassLoader();
loader = loader == null ? ClassLoader.getSystemClassLoader() : loader;
final Class<?> c = loader.loadClass(cls);
Field f = c.getField("INSTANCE");
return (T) f.get(null);
}
private final String impl;
private final LZ4Compressor fastCompressor;
private final LZ4Compressor highCompressor;
private final LZ4FastDecompressor fastDecompressor;
private final LZ4SafeDecompressor safeDecompressor;
private final LZ4Compressor[] highCompressors = new LZ4Compressor[MAX_COMPRESSION_LEVEL + 1];
private LZ4Factory(String impl) throws ClassNotFoundException, NoSuchFieldException, SecurityException, IllegalArgumentException, IllegalAccessException, NoSuchMethodException, InstantiationException, InvocationTargetException {
this.impl = impl;
fastCompressor = classInstance("net.jpountz.lz4.LZ4" + impl + "Compressor");
highCompressor = classInstance("net.jpountz.lz4.LZ4HC" + impl + "Compressor");
fastDecompressor = classInstance("net.jpountz.lz4.LZ4" + impl + "FastDecompressor");
safeDecompressor = classInstance("net.jpountz.lz4.LZ4" + impl + "SafeDecompressor");
Constructor<? extends LZ4Compressor> highConstructor = highCompressor.getClass().getDeclaredConstructor(int.class);
highCompressors[DEFAULT_COMPRESSION_LEVEL] = highCompressor;
for (int level = 1; level <= MAX_COMPRESSION_LEVEL; level++) {
if (level == DEFAULT_COMPRESSION_LEVEL) continue;
highCompressors[level] = highConstructor.newInstance(level);
}
// quickly test that everything works as expected
final byte[] original = new byte[]{'a', 'b', 'c', 'd', ' ', ' ', ' ', ' ', ' ', ' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'};
for (LZ4Compressor compressor : Arrays.asList(fastCompressor, highCompressor)) {
final int maxCompressedLength = compressor.maxCompressedLength(original.length);
final byte[] compressed = new byte[maxCompressedLength];
final int compressedLength = compressor.compress(original, 0, original.length, compressed, 0, maxCompressedLength);
final byte[] restored = new byte[original.length];
fastDecompressor.decompress(compressed, 0, restored, 0, original.length);
if (!Arrays.equals(original, restored)) {
throw new AssertionError();
}
Arrays.fill(restored, (byte) 0);
final int decompressedLength = safeDecompressor.decompress(compressed, 0, compressedLength, restored, 0);
if (decompressedLength != original.length || !Arrays.equals(original, restored)) {
throw new AssertionError();
}
}
}
/**
* Return a blazing fast {@link LZ4Compressor}.
*/
public LZ4Compressor fastCompressor() {
return fastCompressor;
}
/**
* Return a {@link LZ4Compressor} which requires more memory than
* {@link #fastCompressor()} and is slower but compresses more efficiently.
*/
public LZ4Compressor highCompressor() {
return highCompressor;
}
/**
* Return a {@link LZ4Compressor} which requires more memory than
* {@link #fastCompressor()} and is slower but compresses more efficiently.
* The compression level can be customized.
* <p>For current implementations, the following is true about compression level:<ol>
* <li>It should be in range [1, 17]</li>
* <li>A compression level higher than 17 would be treated as 17.</li>
* <li>A compression level lower than 1 would be treated as 9.</li>
* </ol></p>
*/
public LZ4Compressor highCompressor(int compressionLevel) {
if (compressionLevel > MAX_COMPRESSION_LEVEL) {
compressionLevel = MAX_COMPRESSION_LEVEL;
} else if (compressionLevel < 1) {
compressionLevel = DEFAULT_COMPRESSION_LEVEL;
}
return highCompressors[compressionLevel];
}
/**
* Return a {@link LZ4FastDecompressor} instance.
*/
public LZ4FastDecompressor fastDecompressor() {
return fastDecompressor;
}
/**
* Return a {@link LZ4SafeDecompressor} instance.
*/
public LZ4SafeDecompressor safeDecompressor() {
return safeDecompressor;
}
/**
* Return a {@link LZ4UnknownSizeDecompressor} instance.
*
* @deprecated use {@link #safeDecompressor()}
*/
public LZ4UnknownSizeDecompressor unknownSizeDecompressor() {
return safeDecompressor();
}
/**
* Return a {@link LZ4Decompressor} instance.
*
* @deprecated use {@link #fastDecompressor()}
*/
public LZ4Decompressor decompressor() {
return fastDecompressor();
}
@Override
public String toString() {
return getClass().getSimpleName() + ":" + impl;
}
}

Datei anzeigen

@ -1,108 +0,0 @@
package net.jpountz.lz4;
import java.nio.ByteBuffer;
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* LZ4 decompressor that requires the size of the original input to be known.
* Use {@link LZ4SafeDecompressor} if you only know the size of the
* compressed stream.
* <p>
* Instances of this class are thread-safe.
*/
public abstract class LZ4FastDecompressor implements LZ4Decompressor {
/**
* Decompress <code>src[srcOff:]</code> into <code>dest[destOff:destOff+destLen]</code>
* and return the number of bytes read from <code>src</code>.
* <code>destLen</code> must be exactly the size of the decompressed data.
*
* @param destLen the <b>exact</b> size of the original input
* @return the number of bytes read to restore the original input
*/
public abstract int decompress(byte[] src, int srcOff, byte[] dest, int destOff, int destLen);
/**
* Decompress <code>src[srcOff:]</code> into <code>dest[destOff:destOff+destLen]</code>
* and return the number of bytes read from <code>src</code>.
* <code>destLen</code> must be exactly the size of the decompressed data.
* The positions and limits of the {@link ByteBuffer}s remain unchanged.
*
* @param destLen the <b>exact</b> size of the original input
* @return the number of bytes read to restore the original input
*/
public abstract int decompress(ByteBuffer src, int srcOff, ByteBuffer dest, int destOff, int destLen);
/**
* Convenience method, equivalent to calling
* {@link #decompress(byte[], int, byte[], int, int) decompress(src, 0, dest, 0, destLen)}.
*/
public final int decompress(byte[] src, byte[] dest, int destLen) {
return decompress(src, 0, dest, 0, destLen);
}
/**
* Convenience method, equivalent to calling
* {@link #decompress(byte[], byte[], int) decompress(src, dest, dest.length)}.
*/
public final int decompress(byte[] src, byte[] dest) {
return decompress(src, dest, dest.length);
}
/**
* Convenience method which returns <code>src[srcOff:?]</code>
* decompressed.
* <p><b><span style="color:red">Warning</span></b>: this method has an
* important overhead due to the fact that it needs to allocate a buffer to
* decompress into.</p>
* <p>Here is how this method is implemented:</p>
* <pre>
* final byte[] decompressed = new byte[destLen];
* decompress(src, srcOff, decompressed, 0, destLen);
* return decompressed;
* </pre>
*/
public final byte[] decompress(byte[] src, int srcOff, int destLen) {
final byte[] decompressed = new byte[destLen];
decompress(src, srcOff, decompressed, 0, destLen);
return decompressed;
}
/**
* Convenience method, equivalent to calling
* {@link #decompress(byte[], int, int) decompress(src, 0, destLen)}.
*/
public final byte[] decompress(byte[] src, int destLen) {
return decompress(src, 0, destLen);
}
/**
* Decompress <code>src</code> into <code>dest</code>. <code>dest</code>'s
* {@link ByteBuffer#remaining()} must be exactly the size of the decompressed
* data. This method moves the positions of the buffers.
*/
public final void decompress(ByteBuffer src, ByteBuffer dest) {
final int read = decompress(src, src.position(), dest, dest.position(), dest.remaining());
dest.position(dest.limit());
src.position(src.position() + read);
}
@Override
public String toString() {
return getClass().getSimpleName();
}
}

Datei anzeigen

@ -1,91 +0,0 @@
package net.jpountz.lz4;
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import net.jpountz.util.ByteBufferUtils;
import net.jpountz.util.SafeUtils;
import java.nio.ByteBuffer;
import static net.jpountz.lz4.LZ4Constants.DEFAULT_COMPRESSION_LEVEL;
/**
* High compression {@link LZ4Compressor}s implemented with JNI bindings to the
* original C implementation of LZ4.
*/
final class LZ4HCJNICompressor extends LZ4Compressor {
public static final LZ4HCJNICompressor INSTANCE = new LZ4HCJNICompressor();
private static LZ4Compressor SAFE_INSTANCE;
private final int compressionLevel;
LZ4HCJNICompressor() {
this(DEFAULT_COMPRESSION_LEVEL);
}
LZ4HCJNICompressor(int compressionLevel) {
this.compressionLevel = compressionLevel;
}
@Override
public int compress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen) {
SafeUtils.checkRange(src, srcOff, srcLen);
SafeUtils.checkRange(dest, destOff, maxDestLen);
final int result = LZ4JNI.LZ4_compressHC(src, null, srcOff, srcLen, dest, null, destOff, maxDestLen, compressionLevel);
if (result <= 0) {
throw new LZ4Exception();
}
return result;
}
@Override
public int compress(ByteBuffer src, int srcOff, int srcLen, ByteBuffer dest, int destOff, int maxDestLen) {
ByteBufferUtils.checkNotReadOnly(dest);
ByteBufferUtils.checkRange(src, srcOff, srcLen);
ByteBufferUtils.checkRange(dest, destOff, maxDestLen);
if ((src.hasArray() || src.isDirect()) && (dest.hasArray() || dest.isDirect())) {
byte[] srcArr = null, destArr = null;
ByteBuffer srcBuf = null, destBuf = null;
if (src.hasArray()) {
srcArr = src.array();
srcOff += src.arrayOffset();
} else {
assert src.isDirect();
srcBuf = src;
}
if (dest.hasArray()) {
destArr = dest.array();
destOff += dest.arrayOffset();
} else {
assert dest.isDirect();
destBuf = dest;
}
final int result = LZ4JNI.LZ4_compressHC(srcArr, srcBuf, srcOff, srcLen, destArr, destBuf, destOff, maxDestLen, compressionLevel);
if (result <= 0) {
throw new LZ4Exception();
}
return result;
} else {
LZ4Compressor safeInstance = SAFE_INSTANCE;
if (safeInstance == null) {
safeInstance = SAFE_INSTANCE = LZ4Factory.safeInstance().highCompressor(compressionLevel);
}
return safeInstance.compress(src, srcOff, srcLen, dest, destOff, maxDestLen);
}
}
}

Datei anzeigen

@ -1,560 +0,0 @@
// Auto-generated: DO NOT EDIT
package net.jpountz.lz4;
import net.jpountz.lz4.LZ4Utils.Match;
import net.jpountz.util.ByteBufferUtils;
import net.jpountz.util.SafeUtils;
import java.nio.ByteBuffer;
import java.util.Arrays;
import static net.jpountz.lz4.LZ4Constants.DEFAULT_COMPRESSION_LEVEL;
import static net.jpountz.lz4.LZ4Constants.HASH_TABLE_SIZE_HC;
import static net.jpountz.lz4.LZ4Constants.LAST_LITERALS;
import static net.jpountz.lz4.LZ4Constants.MAX_DISTANCE;
import static net.jpountz.lz4.LZ4Constants.MF_LIMIT;
import static net.jpountz.lz4.LZ4Constants.MIN_MATCH;
import static net.jpountz.lz4.LZ4Constants.ML_MASK;
import static net.jpountz.lz4.LZ4Constants.OPTIMAL_ML;
import static net.jpountz.lz4.LZ4Utils.copyTo;
import static net.jpountz.lz4.LZ4Utils.hashHC;
/**
* High compression compressor.
*/
final class LZ4HCJavaSafeCompressor extends LZ4Compressor {
public static final LZ4Compressor INSTANCE = new LZ4HCJavaSafeCompressor();
private final int maxAttempts;
final int compressionLevel;
LZ4HCJavaSafeCompressor() {
this(DEFAULT_COMPRESSION_LEVEL);
}
LZ4HCJavaSafeCompressor(int compressionLevel) {
this.maxAttempts = 1 << (compressionLevel - 1);
this.compressionLevel = compressionLevel;
}
private class HashTable {
static final int MASK = MAX_DISTANCE - 1;
int nextToUpdate;
private final int base;
private final int[] hashTable;
private final short[] chainTable;
HashTable(int base) {
this.base = base;
nextToUpdate = base;
hashTable = new int[HASH_TABLE_SIZE_HC];
Arrays.fill(hashTable, -1);
chainTable = new short[MAX_DISTANCE];
}
private int hashPointer(byte[] bytes, int off) {
final int v = SafeUtils.readInt(bytes, off);
return hashPointer(v);
}
private int hashPointer(ByteBuffer bytes, int off) {
final int v = ByteBufferUtils.readInt(bytes, off);
return hashPointer(v);
}
private int hashPointer(int v) {
final int h = hashHC(v);
return hashTable[h];
}
private int next(int off) {
return off - (chainTable[off & MASK] & 0xFFFF);
}
private void addHash(byte[] bytes, int off) {
final int v = SafeUtils.readInt(bytes, off);
addHash(v, off);
}
private void addHash(ByteBuffer bytes, int off) {
final int v = ByteBufferUtils.readInt(bytes, off);
addHash(v, off);
}
private void addHash(int v, int off) {
final int h = hashHC(v);
int delta = off - hashTable[h];
assert delta > 0 : delta;
if (delta >= MAX_DISTANCE) {
delta = MAX_DISTANCE - 1;
}
chainTable[off & MASK] = (short) delta;
hashTable[h] = off;
}
void insert(int off, byte[] bytes) {
for (; nextToUpdate < off; ++nextToUpdate) {
addHash(bytes, nextToUpdate);
}
}
void insert(int off, ByteBuffer bytes) {
for (; nextToUpdate < off; ++nextToUpdate) {
addHash(bytes, nextToUpdate);
}
}
boolean insertAndFindBestMatch(byte[] buf, int off, int matchLimit, Match match) {
match.start = off;
match.len = 0;
int delta = 0;
int repl = 0;
insert(off, buf);
int ref = hashPointer(buf, off);
if (ref >= off - 4 && ref <= off && ref >= base) { // potential repetition
if (LZ4SafeUtils.readIntEquals(buf, ref, off)) { // confirmed
delta = off - ref;
repl = match.len = MIN_MATCH + LZ4SafeUtils.commonBytes(buf, ref + MIN_MATCH, off + MIN_MATCH, matchLimit);
match.ref = ref;
}
ref = next(ref);
}
for (int i = 0; i < maxAttempts; ++i) {
if (ref < Math.max(base, off - MAX_DISTANCE + 1) || ref > off) {
break;
}
if (LZ4SafeUtils.readIntEquals(buf, ref, off)) {
final int matchLen = MIN_MATCH + LZ4SafeUtils.commonBytes(buf, ref + MIN_MATCH, off + MIN_MATCH, matchLimit);
if (matchLen > match.len) {
match.ref = ref;
match.len = matchLen;
}
}
ref = next(ref);
}
if (repl != 0) {
int ptr = off;
final int end = off + repl - (MIN_MATCH - 1);
while (ptr < end - delta) {
chainTable[ptr & MASK] = (short) delta; // pre load
++ptr;
}
do {
chainTable[ptr & MASK] = (short) delta;
hashTable[hashHC(SafeUtils.readInt(buf, ptr))] = ptr;
++ptr;
} while (ptr < end);
nextToUpdate = end;
}
return match.len != 0;
}
boolean insertAndFindWiderMatch(byte[] buf, int off, int startLimit, int matchLimit, int minLen, Match match) {
match.len = minLen;
insert(off, buf);
final int delta = off - startLimit;
int ref = hashPointer(buf, off);
for (int i = 0; i < maxAttempts; ++i) {
if (ref < Math.max(base, off - MAX_DISTANCE + 1) || ref > off) {
break;
}
if (LZ4SafeUtils.readIntEquals(buf, ref, off)) {
final int matchLenForward = MIN_MATCH + LZ4SafeUtils.commonBytes(buf, ref + MIN_MATCH, off + MIN_MATCH, matchLimit);
final int matchLenBackward = LZ4SafeUtils.commonBytesBackward(buf, ref, off, base, startLimit);
final int matchLen = matchLenBackward + matchLenForward;
if (matchLen > match.len) {
match.len = matchLen;
match.ref = ref - matchLenBackward;
match.start = off - matchLenBackward;
}
}
ref = next(ref);
}
return match.len > minLen;
}
boolean insertAndFindBestMatch(ByteBuffer buf, int off, int matchLimit, Match match) {
match.start = off;
match.len = 0;
int delta = 0;
int repl = 0;
insert(off, buf);
int ref = hashPointer(buf, off);
if (ref >= off - 4 && ref <= off && ref >= base) { // potential repetition
if (LZ4ByteBufferUtils.readIntEquals(buf, ref, off)) { // confirmed
delta = off - ref;
repl = match.len = MIN_MATCH + LZ4ByteBufferUtils.commonBytes(buf, ref + MIN_MATCH, off + MIN_MATCH, matchLimit);
match.ref = ref;
}
ref = next(ref);
}
for (int i = 0; i < maxAttempts; ++i) {
if (ref < Math.max(base, off - MAX_DISTANCE + 1) || ref > off) {
break;
}
if (LZ4ByteBufferUtils.readIntEquals(buf, ref, off)) {
final int matchLen = MIN_MATCH + LZ4ByteBufferUtils.commonBytes(buf, ref + MIN_MATCH, off + MIN_MATCH, matchLimit);
if (matchLen > match.len) {
match.ref = ref;
match.len = matchLen;
}
}
ref = next(ref);
}
if (repl != 0) {
int ptr = off;
final int end = off + repl - (MIN_MATCH - 1);
while (ptr < end - delta) {
chainTable[ptr & MASK] = (short) delta; // pre load
++ptr;
}
do {
chainTable[ptr & MASK] = (short) delta;
hashTable[hashHC(ByteBufferUtils.readInt(buf, ptr))] = ptr;
++ptr;
} while (ptr < end);
nextToUpdate = end;
}
return match.len != 0;
}
boolean insertAndFindWiderMatch(ByteBuffer buf, int off, int startLimit, int matchLimit, int minLen, Match match) {
match.len = minLen;
insert(off, buf);
final int delta = off - startLimit;
int ref = hashPointer(buf, off);
for (int i = 0; i < maxAttempts; ++i) {
if (ref < Math.max(base, off - MAX_DISTANCE + 1) || ref > off) {
break;
}
if (LZ4ByteBufferUtils.readIntEquals(buf, ref, off)) {
final int matchLenForward = MIN_MATCH + LZ4ByteBufferUtils.commonBytes(buf, ref + MIN_MATCH, off + MIN_MATCH, matchLimit);
final int matchLenBackward = LZ4ByteBufferUtils.commonBytesBackward(buf, ref, off, base, startLimit);
final int matchLen = matchLenBackward + matchLenForward;
if (matchLen > match.len) {
match.len = matchLen;
match.ref = ref - matchLenBackward;
match.start = off - matchLenBackward;
}
}
ref = next(ref);
}
return match.len > minLen;
}
}
@Override
public int compress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen) {
SafeUtils.checkRange(src, srcOff, srcLen);
SafeUtils.checkRange(dest, destOff, maxDestLen);
final int srcEnd = srcOff + srcLen;
final int destEnd = destOff + maxDestLen;
final int mfLimit = srcEnd - MF_LIMIT;
final int matchLimit = srcEnd - LAST_LITERALS;
int sOff = srcOff;
int dOff = destOff;
int anchor = sOff++;
final HashTable ht = new HashTable(srcOff);
final Match match0 = new Match();
final Match match1 = new Match();
final Match match2 = new Match();
final Match match3 = new Match();
main:
while (sOff < mfLimit) {
if (!ht.insertAndFindBestMatch(src, sOff, matchLimit, match1)) {
++sOff;
continue;
}
// saved, in case we would skip too much
copyTo(match1, match0);
search2:
while (true) {
assert match1.start >= anchor;
if (match1.end() >= mfLimit
|| !ht.insertAndFindWiderMatch(src, match1.end() - 2, match1.start + 1, matchLimit, match1.len, match2)) {
// no better match
dOff = LZ4SafeUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd);
anchor = sOff = match1.end();
continue main;
}
if (match0.start < match1.start) {
if (match2.start < match1.start + match0.len) { // empirical
copyTo(match0, match1);
}
}
assert match2.start > match1.start;
if (match2.start - match1.start < 3) { // First Match too small : removed
copyTo(match2, match1);
continue search2;
}
search3:
while (true) {
if (match2.start - match1.start < OPTIMAL_ML) {
int newMatchLen = match1.len;
if (newMatchLen > OPTIMAL_ML) {
newMatchLen = OPTIMAL_ML;
}
if (match1.start + newMatchLen > match2.end() - MIN_MATCH) {
newMatchLen = match2.start - match1.start + match2.len - MIN_MATCH;
}
final int correction = newMatchLen - (match2.start - match1.start);
if (correction > 0) {
match2.fix(correction);
}
}
if (match2.start + match2.len >= mfLimit
|| !ht.insertAndFindWiderMatch(src, match2.end() - 3, match2.start, matchLimit, match2.len, match3)) {
// no better match -> 2 sequences to encode
if (match2.start < match1.end()) {
match1.len = match2.start - match1.start;
}
// encode seq 1
dOff = LZ4SafeUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd);
anchor = sOff = match1.end();
// encode seq 2
dOff = LZ4SafeUtils.encodeSequence(src, anchor, match2.start, match2.ref, match2.len, dest, dOff, destEnd);
anchor = sOff = match2.end();
continue main;
}
if (match3.start < match1.end() + 3) { // Not enough space for match 2 : remove it
if (match3.start >= match1.end()) { // // can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1
if (match2.start < match1.end()) {
final int correction = match1.end() - match2.start;
match2.fix(correction);
if (match2.len < MIN_MATCH) {
copyTo(match3, match2);
}
}
dOff = LZ4SafeUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd);
anchor = sOff = match1.end();
copyTo(match3, match1);
copyTo(match2, match0);
continue search2;
}
copyTo(match3, match2);
continue search3;
}
// OK, now we have 3 ascending matches; let's write at least the first one
if (match2.start < match1.end()) {
if (match2.start - match1.start < ML_MASK) {
if (match1.len > OPTIMAL_ML) {
match1.len = OPTIMAL_ML;
}
if (match1.end() > match2.end() - MIN_MATCH) {
match1.len = match2.end() - match1.start - MIN_MATCH;
}
final int correction = match1.end() - match2.start;
match2.fix(correction);
} else {
match1.len = match2.start - match1.start;
}
}
dOff = LZ4SafeUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd);
anchor = sOff = match1.end();
copyTo(match2, match1);
copyTo(match3, match2);
continue search3;
}
}
}
dOff = LZ4SafeUtils.lastLiterals(src, anchor, srcEnd - anchor, dest, dOff, destEnd);
return dOff - destOff;
}
@Override
public int compress(ByteBuffer src, int srcOff, int srcLen, ByteBuffer dest, int destOff, int maxDestLen) {
if (src.hasArray() && dest.hasArray()) {
return compress(src.array(), srcOff + src.arrayOffset(), srcLen, dest.array(), destOff + dest.arrayOffset(), maxDestLen);
}
src = ByteBufferUtils.inNativeByteOrder(src);
dest = ByteBufferUtils.inNativeByteOrder(dest);
ByteBufferUtils.checkRange(src, srcOff, srcLen);
ByteBufferUtils.checkRange(dest, destOff, maxDestLen);
final int srcEnd = srcOff + srcLen;
final int destEnd = destOff + maxDestLen;
final int mfLimit = srcEnd - MF_LIMIT;
final int matchLimit = srcEnd - LAST_LITERALS;
int sOff = srcOff;
int dOff = destOff;
int anchor = sOff++;
final HashTable ht = new HashTable(srcOff);
final Match match0 = new Match();
final Match match1 = new Match();
final Match match2 = new Match();
final Match match3 = new Match();
main:
while (sOff < mfLimit) {
if (!ht.insertAndFindBestMatch(src, sOff, matchLimit, match1)) {
++sOff;
continue;
}
// saved, in case we would skip too much
copyTo(match1, match0);
search2:
while (true) {
assert match1.start >= anchor;
if (match1.end() >= mfLimit
|| !ht.insertAndFindWiderMatch(src, match1.end() - 2, match1.start + 1, matchLimit, match1.len, match2)) {
// no better match
dOff = LZ4ByteBufferUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd);
anchor = sOff = match1.end();
continue main;
}
if (match0.start < match1.start) {
if (match2.start < match1.start + match0.len) { // empirical
copyTo(match0, match1);
}
}
assert match2.start > match1.start;
if (match2.start - match1.start < 3) { // First Match too small : removed
copyTo(match2, match1);
continue search2;
}
search3:
while (true) {
if (match2.start - match1.start < OPTIMAL_ML) {
int newMatchLen = match1.len;
if (newMatchLen > OPTIMAL_ML) {
newMatchLen = OPTIMAL_ML;
}
if (match1.start + newMatchLen > match2.end() - MIN_MATCH) {
newMatchLen = match2.start - match1.start + match2.len - MIN_MATCH;
}
final int correction = newMatchLen - (match2.start - match1.start);
if (correction > 0) {
match2.fix(correction);
}
}
if (match2.start + match2.len >= mfLimit
|| !ht.insertAndFindWiderMatch(src, match2.end() - 3, match2.start, matchLimit, match2.len, match3)) {
// no better match -> 2 sequences to encode
if (match2.start < match1.end()) {
match1.len = match2.start - match1.start;
}
// encode seq 1
dOff = LZ4ByteBufferUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd);
anchor = sOff = match1.end();
// encode seq 2
dOff = LZ4ByteBufferUtils.encodeSequence(src, anchor, match2.start, match2.ref, match2.len, dest, dOff, destEnd);
anchor = sOff = match2.end();
continue main;
}
if (match3.start < match1.end() + 3) { // Not enough space for match 2 : remove it
if (match3.start >= match1.end()) { // // can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1
if (match2.start < match1.end()) {
final int correction = match1.end() - match2.start;
match2.fix(correction);
if (match2.len < MIN_MATCH) {
copyTo(match3, match2);
}
}
dOff = LZ4ByteBufferUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd);
anchor = sOff = match1.end();
copyTo(match3, match1);
copyTo(match2, match0);
continue search2;
}
copyTo(match3, match2);
continue search3;
}
// OK, now we have 3 ascending matches; let's write at least the first one
if (match2.start < match1.end()) {
if (match2.start - match1.start < ML_MASK) {
if (match1.len > OPTIMAL_ML) {
match1.len = OPTIMAL_ML;
}
if (match1.end() > match2.end() - MIN_MATCH) {
match1.len = match2.end() - match1.start - MIN_MATCH;
}
final int correction = match1.end() - match2.start;
match2.fix(correction);
} else {
match1.len = match2.start - match1.start;
}
}
dOff = LZ4ByteBufferUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd);
anchor = sOff = match1.end();
copyTo(match2, match1);
copyTo(match3, match2);
continue search3;
}
}
}
dOff = LZ4ByteBufferUtils.lastLiterals(src, anchor, srcEnd - anchor, dest, dOff, destEnd);
return dOff - destOff;
}
}

Datei anzeigen

@ -1,138 +0,0 @@
package net.jpountz.lz4;
import java.io.IOException;
import java.io.InputStream;
public class LZ4InputStream extends InputStream {
private static LZ4Factory factory = LZ4Factory.fastestInstance();
private final InputStream inputStream;
private final LZ4Decompressor decompressor;
private byte compressedBuffer[];
private byte decompressedBuffer[];
private int decompressedBufferPosition = 0;
private int decompressedBufferLength = 0;
public LZ4InputStream(InputStream stream) {
this(stream, 1048576);
}
public LZ4InputStream(InputStream stream, int size) {
this.decompressor = factory.decompressor();
this.inputStream = stream;
compressedBuffer = new byte[size];
decompressedBuffer = new byte[size];
}
@Override
public void close() throws IOException {
inputStream.close();
}
@Override
public int read() throws IOException {
if (ensureBytesAvailableInDecompressedBuffer())
return decompressedBuffer[decompressedBufferPosition++] & 0xFF;
return -1;
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
if (!ensureBytesAvailableInDecompressedBuffer())
return -1;
int numBytesRemainingToRead = len - off;
while (numBytesRemainingToRead > 0 && ensureBytesAvailableInDecompressedBuffer()) {
int numBytesToRead = numBytesRemainingToRead;
int numBytesRemainingInBlock = decompressedBufferLength - decompressedBufferPosition;
if (numBytesToRead > numBytesRemainingInBlock) {
numBytesToRead = numBytesRemainingInBlock;
}
System.arraycopy(decompressedBuffer, decompressedBufferPosition, b, off, numBytesToRead);
decompressedBufferPosition += numBytesToRead;
off += numBytesToRead;
numBytesRemainingToRead -= numBytesToRead;
}
return len - numBytesRemainingToRead;
}
@Override
public long skip(long n) throws IOException {
long numBytesRemainingToSkip = n;
while (numBytesRemainingToSkip > 0 && ensureBytesAvailableInDecompressedBuffer()) {
long numBytesToSkip = numBytesRemainingToSkip;
int numBytesRemainingInBlock = decompressedBufferLength - decompressedBufferPosition;
if (numBytesToSkip > numBytesRemainingInBlock) {
numBytesToSkip = numBytesRemainingInBlock;
}
numBytesRemainingToSkip -= numBytesToSkip;
decompressedBufferPosition += numBytesToSkip;
}
return n - numBytesRemainingToSkip;
}
private boolean ensureBytesAvailableInDecompressedBuffer() throws IOException {
while (decompressedBufferPosition >= decompressedBufferLength) {
if (!fillBuffer()) {
return false;
}
}
return true;
}
private boolean fillBuffer() throws IOException {
decompressedBufferLength = LZ4StreamHelper.readLength(inputStream);
int compressedBufferLength = LZ4StreamHelper.readLength(inputStream);
if (blockHeadersIndicateNoMoreData(compressedBufferLength, decompressedBufferLength)) {
return false;
}
ensureBufferCapacity(compressedBufferLength, decompressedBufferLength);
if (fillCompressedBuffer(compressedBufferLength)) {
decompressor.decompress(compressedBuffer, 0, decompressedBuffer, 0, decompressedBufferLength);
decompressedBufferPosition = 0;
return true;
}
return false;
}
private boolean blockHeadersIndicateNoMoreData(int compressedBufferLength, int decompressedBufferLength) {
return compressedBufferLength < 0 || decompressedBufferLength < 0;
}
private boolean fillCompressedBuffer(int compressedBufferLength) throws IOException {
int bytesRead = 0;
while (bytesRead < compressedBufferLength) {
int bytesReadInAttempt = inputStream.read(compressedBuffer, bytesRead, compressedBufferLength - bytesRead);
if (bytesReadInAttempt < 0)
return false;
bytesRead += bytesReadInAttempt;
}
return true;
}
private void ensureBufferCapacity(int compressedBufferLength, int decompressedBufferLength) {
if (compressedBufferLength > compressedBuffer.length) {
compressedBuffer = new byte[compressedBufferLength];
}
if (decompressedBufferLength > decompressedBuffer.length) {
decompressedBuffer = new byte[decompressedBufferLength];
}
}
}

Datei anzeigen

@ -1,46 +0,0 @@
package net.jpountz.lz4;
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import net.jpountz.util.Native;
import java.nio.ByteBuffer;
/**
* JNI bindings to the original C implementation of LZ4.
*/
enum LZ4JNI {
;
static {
Native.load();
init();
}
static native void init();
static native int LZ4_compress_limitedOutput(byte[] srcArray, ByteBuffer srcBuffer, int srcOff, int srcLen, byte[] destArray, ByteBuffer destBuffer, int destOff, int maxDestLen);
static native int LZ4_compressHC(byte[] srcArray, ByteBuffer srcBuffer, int srcOff, int srcLen, byte[] destArray, ByteBuffer destBuffer, int destOff, int maxDestLen, int compressionLevel);
static native int LZ4_decompress_fast(byte[] srcArray, ByteBuffer srcBuffer, int srcOff, byte[] destArray, ByteBuffer destBuffer, int destOff, int destLen);
static native int LZ4_decompress_safe(byte[] srcArray, ByteBuffer srcBuffer, int srcOff, int srcLen, byte[] destArray, ByteBuffer destBuffer, int destOff, int maxDestLen);
static native int LZ4_compressBound(int len);
}

Datei anzeigen

@ -1,80 +0,0 @@
package net.jpountz.lz4;
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.nio.ByteBuffer;
import static net.jpountz.util.ByteBufferUtils.checkNotReadOnly;
import static net.jpountz.util.ByteBufferUtils.checkRange;
import static net.jpountz.util.SafeUtils.checkRange;
/**
* Fast {@link LZ4Compressor} implemented with JNI bindings to the original C
* implementation of LZ4.
*/
final class LZ4JNICompressor extends LZ4Compressor {
public static final LZ4Compressor INSTANCE = new LZ4JNICompressor();
private static LZ4Compressor SAFE_INSTANCE;
@Override
public int compress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen) {
checkRange(src, srcOff, srcLen);
checkRange(dest, destOff, maxDestLen);
final int result = LZ4JNI.LZ4_compress_limitedOutput(src, null, srcOff, srcLen, dest, null, destOff, maxDestLen);
if (result <= 0) {
throw new LZ4Exception("maxDestLen is too small");
}
return result;
}
@Override
public int compress(ByteBuffer src, int srcOff, int srcLen, ByteBuffer dest, int destOff, int maxDestLen) {
checkNotReadOnly(dest);
checkRange(src, srcOff, srcLen);
checkRange(dest, destOff, maxDestLen);
if ((src.hasArray() || src.isDirect()) && (dest.hasArray() || dest.isDirect())) {
byte[] srcArr = null, destArr = null;
ByteBuffer srcBuf = null, destBuf = null;
if (src.hasArray()) {
srcArr = src.array();
srcOff += src.arrayOffset();
} else {
assert src.isDirect();
srcBuf = src;
}
if (dest.hasArray()) {
destArr = dest.array();
destOff += dest.arrayOffset();
} else {
assert dest.isDirect();
destBuf = dest;
}
final int result = LZ4JNI.LZ4_compress_limitedOutput(srcArr, srcBuf, srcOff, srcLen, destArr, destBuf, destOff, maxDestLen);
if (result <= 0) {
throw new LZ4Exception("maxDestLen is too small");
}
return result;
} else {
LZ4Compressor safeInstance = SAFE_INSTANCE;
if (safeInstance == null) {
safeInstance = SAFE_INSTANCE = LZ4Factory.safeInstance().fastCompressor();
}
return safeInstance.compress(src, srcOff, srcLen, dest, destOff, maxDestLen);
}
}
}

Datei anzeigen

@ -1,82 +0,0 @@
package net.jpountz.lz4;
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import net.jpountz.util.ByteBufferUtils;
import net.jpountz.util.SafeUtils;
import java.nio.ByteBuffer;
/**
* {@link LZ4FastDecompressor} implemented with JNI bindings to the original C
* implementation of LZ4.
*/
final class LZ4JNIFastDecompressor extends LZ4FastDecompressor {
public static final LZ4JNIFastDecompressor INSTANCE = new LZ4JNIFastDecompressor();
private static LZ4FastDecompressor SAFE_INSTANCE;
@Override
public final int decompress(byte[] src, int srcOff, byte[] dest, int destOff, int destLen) {
SafeUtils.checkRange(src, srcOff);
SafeUtils.checkRange(dest, destOff, destLen);
final int result = LZ4JNI.LZ4_decompress_fast(src, null, srcOff, dest, null, destOff, destLen);
if (result < 0) {
throw new LZ4Exception("Error decoding offset " + (srcOff - result) + " of input buffer");
}
return result;
}
@Override
public int decompress(ByteBuffer src, int srcOff, ByteBuffer dest, int destOff, int destLen) {
ByteBufferUtils.checkNotReadOnly(dest);
ByteBufferUtils.checkRange(src, srcOff);
ByteBufferUtils.checkRange(dest, destOff, destLen);
if ((src.hasArray() || src.isDirect()) && (dest.hasArray() || dest.isDirect())) {
byte[] srcArr = null, destArr = null;
ByteBuffer srcBuf = null, destBuf = null;
if (src.hasArray()) {
srcArr = src.array();
srcOff += src.arrayOffset();
} else {
assert src.isDirect();
srcBuf = src;
}
if (dest.hasArray()) {
destArr = dest.array();
destOff += dest.arrayOffset();
} else {
assert dest.isDirect();
destBuf = dest;
}
final int result = LZ4JNI.LZ4_decompress_fast(srcArr, srcBuf, srcOff, destArr, destBuf, destOff, destLen);
if (result < 0) {
throw new LZ4Exception("Error decoding offset " + (srcOff - result) + " of input buffer");
}
return result;
} else {
LZ4FastDecompressor safeInstance = SAFE_INSTANCE;
if (safeInstance == null) {
safeInstance = SAFE_INSTANCE = LZ4Factory.safeInstance().fastDecompressor();
}
return safeInstance.decompress(src, srcOff, dest, destOff, destLen);
}
}
}

Datei anzeigen

@ -1,81 +0,0 @@
package net.jpountz.lz4;
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import net.jpountz.util.ByteBufferUtils;
import net.jpountz.util.SafeUtils;
import java.nio.ByteBuffer;
/**
* {@link LZ4SafeDecompressor} implemented with JNI bindings to the original C
* implementation of LZ4.
*/
final class LZ4JNISafeDecompressor extends LZ4SafeDecompressor {
public static final LZ4JNISafeDecompressor INSTANCE = new LZ4JNISafeDecompressor();
private static LZ4SafeDecompressor SAFE_INSTANCE;
@Override
public final int decompress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen) {
SafeUtils.checkRange(src, srcOff, srcLen);
SafeUtils.checkRange(dest, destOff, maxDestLen);
final int result = LZ4JNI.LZ4_decompress_safe(src, null, srcOff, srcLen, dest, null, destOff, maxDestLen);
if (result < 0) {
throw new LZ4Exception("Error decoding offset " + (srcOff - result) + " of input buffer");
}
return result;
}
@Override
public int decompress(ByteBuffer src, int srcOff, int srcLen, ByteBuffer dest, int destOff, int maxDestLen) {
ByteBufferUtils.checkNotReadOnly(dest);
ByteBufferUtils.checkRange(src, srcOff, srcLen);
ByteBufferUtils.checkRange(dest, destOff, maxDestLen);
if ((src.hasArray() || src.isDirect()) && (dest.hasArray() || dest.isDirect())) {
byte[] srcArr = null, destArr = null;
ByteBuffer srcBuf = null, destBuf = null;
if (src.hasArray()) {
srcArr = src.array();
srcOff += src.arrayOffset();
} else {
assert src.isDirect();
srcBuf = src;
}
if (dest.hasArray()) {
destArr = dest.array();
destOff += dest.arrayOffset();
} else {
assert dest.isDirect();
destBuf = dest;
}
final int result = LZ4JNI.LZ4_decompress_safe(srcArr, srcBuf, srcOff, srcLen, destArr, destBuf, destOff, maxDestLen);
if (result < 0) {
throw new LZ4Exception("Error decoding offset " + (srcOff - result) + " of input buffer");
}
return result;
} else {
LZ4SafeDecompressor safeInstance = SAFE_INSTANCE;
if (safeInstance == null) {
safeInstance = SAFE_INSTANCE = LZ4Factory.safeInstance().safeDecompressor();
}
return safeInstance.decompress(src, srcOff, srcLen, dest, destOff, maxDestLen);
}
}
}

Datei anzeigen

@ -1,523 +0,0 @@
// Auto-generated: DO NOT EDIT
package net.jpountz.lz4;
import net.jpountz.util.ByteBufferUtils;
import net.jpountz.util.SafeUtils;
import java.nio.ByteBuffer;
import java.util.Arrays;
import static net.jpountz.lz4.LZ4Constants.HASH_TABLE_SIZE;
import static net.jpountz.lz4.LZ4Constants.HASH_TABLE_SIZE_64K;
import static net.jpountz.lz4.LZ4Constants.LAST_LITERALS;
import static net.jpountz.lz4.LZ4Constants.LZ4_64K_LIMIT;
import static net.jpountz.lz4.LZ4Constants.MAX_DISTANCE;
import static net.jpountz.lz4.LZ4Constants.MF_LIMIT;
import static net.jpountz.lz4.LZ4Constants.MIN_LENGTH;
import static net.jpountz.lz4.LZ4Constants.MIN_MATCH;
import static net.jpountz.lz4.LZ4Constants.ML_BITS;
import static net.jpountz.lz4.LZ4Constants.ML_MASK;
import static net.jpountz.lz4.LZ4Constants.RUN_MASK;
import static net.jpountz.lz4.LZ4Constants.SKIP_STRENGTH;
import static net.jpountz.lz4.LZ4Utils.hash;
import static net.jpountz.lz4.LZ4Utils.hash64k;
/**
* Compressor.
*/
final class LZ4JavaSafeCompressor extends LZ4Compressor {
public static final LZ4Compressor INSTANCE = new LZ4JavaSafeCompressor();
static int compress64k(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int destEnd) {
final int srcEnd = srcOff + srcLen;
final int srcLimit = srcEnd - LAST_LITERALS;
final int mflimit = srcEnd - MF_LIMIT;
int sOff = srcOff, dOff = destOff;
int anchor = sOff;
if (srcLen >= MIN_LENGTH) {
final short[] hashTable = new short[HASH_TABLE_SIZE_64K];
++sOff;
main:
while (true) {
// find a match
int forwardOff = sOff;
int ref;
int step = 1;
int searchMatchNb = 1 << SKIP_STRENGTH;
do {
sOff = forwardOff;
forwardOff += step;
step = searchMatchNb++ >>> SKIP_STRENGTH;
if (forwardOff > mflimit) {
break main;
}
final int h = hash64k(SafeUtils.readInt(src, sOff));
ref = srcOff + SafeUtils.readShort(hashTable, h);
SafeUtils.writeShort(hashTable, h, sOff - srcOff);
} while (!LZ4SafeUtils.readIntEquals(src, ref, sOff));
// catch up
final int excess = LZ4SafeUtils.commonBytesBackward(src, ref, sOff, srcOff, anchor);
sOff -= excess;
ref -= excess;
// sequence == refsequence
final int runLen = sOff - anchor;
// encode literal length
int tokenOff = dOff++;
if (dOff + runLen + (2 + 1 + LAST_LITERALS) + (runLen >>> 8) > destEnd) {
throw new LZ4Exception("maxDestLen is too small");
}
if (runLen >= RUN_MASK) {
SafeUtils.writeByte(dest, tokenOff, RUN_MASK << ML_BITS);
dOff = LZ4SafeUtils.writeLen(runLen - RUN_MASK, dest, dOff);
} else {
SafeUtils.writeByte(dest, tokenOff, runLen << ML_BITS);
}
// copy literals
LZ4SafeUtils.wildArraycopy(src, anchor, dest, dOff, runLen);
dOff += runLen;
while (true) {
// encode offset
SafeUtils.writeShortLE(dest, dOff, (short) (sOff - ref));
dOff += 2;
// count nb matches
sOff += MIN_MATCH;
ref += MIN_MATCH;
final int matchLen = LZ4SafeUtils.commonBytes(src, ref, sOff, srcLimit);
if (dOff + (1 + LAST_LITERALS) + (matchLen >>> 8) > destEnd) {
throw new LZ4Exception("maxDestLen is too small");
}
sOff += matchLen;
// encode match len
if (matchLen >= ML_MASK) {
SafeUtils.writeByte(dest, tokenOff, SafeUtils.readByte(dest, tokenOff) | ML_MASK);
dOff = LZ4SafeUtils.writeLen(matchLen - ML_MASK, dest, dOff);
} else {
SafeUtils.writeByte(dest, tokenOff, SafeUtils.readByte(dest, tokenOff) | matchLen);
}
// test end of chunk
if (sOff > mflimit) {
anchor = sOff;
break main;
}
// fill table
SafeUtils.writeShort(hashTable, hash64k(SafeUtils.readInt(src, sOff - 2)), sOff - 2 - srcOff);
// test next position
final int h = hash64k(SafeUtils.readInt(src, sOff));
ref = srcOff + SafeUtils.readShort(hashTable, h);
SafeUtils.writeShort(hashTable, h, sOff - srcOff);
if (!LZ4SafeUtils.readIntEquals(src, sOff, ref)) {
break;
}
tokenOff = dOff++;
SafeUtils.writeByte(dest, tokenOff, 0);
}
// prepare next loop
anchor = sOff++;
}
}
dOff = LZ4SafeUtils.lastLiterals(src, anchor, srcEnd - anchor, dest, dOff, destEnd);
return dOff - destOff;
}
@Override
public int compress(byte[] src, final int srcOff, int srcLen, byte[] dest, final int destOff, int maxDestLen) {
SafeUtils.checkRange(src, srcOff, srcLen);
SafeUtils.checkRange(dest, destOff, maxDestLen);
final int destEnd = destOff + maxDestLen;
if (srcLen < LZ4_64K_LIMIT) {
return compress64k(src, srcOff, srcLen, dest, destOff, destEnd);
}
final int srcEnd = srcOff + srcLen;
final int srcLimit = srcEnd - LAST_LITERALS;
final int mflimit = srcEnd - MF_LIMIT;
int sOff = srcOff, dOff = destOff;
int anchor = sOff++;
final int[] hashTable = new int[HASH_TABLE_SIZE];
Arrays.fill(hashTable, anchor);
main:
while (true) {
// find a match
int forwardOff = sOff;
int ref;
int step = 1;
int searchMatchNb = 1 << SKIP_STRENGTH;
int back;
do {
sOff = forwardOff;
forwardOff += step;
step = searchMatchNb++ >>> SKIP_STRENGTH;
if (forwardOff > mflimit) {
break main;
}
final int h = hash(SafeUtils.readInt(src, sOff));
ref = SafeUtils.readInt(hashTable, h);
back = sOff - ref;
SafeUtils.writeInt(hashTable, h, sOff);
} while (back >= MAX_DISTANCE || !LZ4SafeUtils.readIntEquals(src, ref, sOff));
final int excess = LZ4SafeUtils.commonBytesBackward(src, ref, sOff, srcOff, anchor);
sOff -= excess;
ref -= excess;
// sequence == refsequence
final int runLen = sOff - anchor;
// encode literal length
int tokenOff = dOff++;
if (dOff + runLen + (2 + 1 + LAST_LITERALS) + (runLen >>> 8) > destEnd) {
throw new LZ4Exception("maxDestLen is too small");
}
if (runLen >= RUN_MASK) {
SafeUtils.writeByte(dest, tokenOff, RUN_MASK << ML_BITS);
dOff = LZ4SafeUtils.writeLen(runLen - RUN_MASK, dest, dOff);
} else {
SafeUtils.writeByte(dest, tokenOff, runLen << ML_BITS);
}
// copy literals
LZ4SafeUtils.wildArraycopy(src, anchor, dest, dOff, runLen);
dOff += runLen;
while (true) {
// encode offset
SafeUtils.writeShortLE(dest, dOff, back);
dOff += 2;
// count nb matches
sOff += MIN_MATCH;
final int matchLen = LZ4SafeUtils.commonBytes(src, ref + MIN_MATCH, sOff, srcLimit);
if (dOff + (1 + LAST_LITERALS) + (matchLen >>> 8) > destEnd) {
throw new LZ4Exception("maxDestLen is too small");
}
sOff += matchLen;
// encode match len
if (matchLen >= ML_MASK) {
SafeUtils.writeByte(dest, tokenOff, SafeUtils.readByte(dest, tokenOff) | ML_MASK);
dOff = LZ4SafeUtils.writeLen(matchLen - ML_MASK, dest, dOff);
} else {
SafeUtils.writeByte(dest, tokenOff, SafeUtils.readByte(dest, tokenOff) | matchLen);
}
// test end of chunk
if (sOff > mflimit) {
anchor = sOff;
break main;
}
// fill table
SafeUtils.writeInt(hashTable, hash(SafeUtils.readInt(src, sOff - 2)), sOff - 2);
// test next position
final int h = hash(SafeUtils.readInt(src, sOff));
ref = SafeUtils.readInt(hashTable, h);
SafeUtils.writeInt(hashTable, h, sOff);
back = sOff - ref;
if (back >= MAX_DISTANCE || !LZ4SafeUtils.readIntEquals(src, ref, sOff)) {
break;
}
tokenOff = dOff++;
SafeUtils.writeByte(dest, tokenOff, 0);
}
// prepare next loop
anchor = sOff++;
}
dOff = LZ4SafeUtils.lastLiterals(src, anchor, srcEnd - anchor, dest, dOff, destEnd);
return dOff - destOff;
}
static int compress64k(ByteBuffer src, int srcOff, int srcLen, ByteBuffer dest, int destOff, int destEnd) {
final int srcEnd = srcOff + srcLen;
final int srcLimit = srcEnd - LAST_LITERALS;
final int mflimit = srcEnd - MF_LIMIT;
int sOff = srcOff, dOff = destOff;
int anchor = sOff;
if (srcLen >= MIN_LENGTH) {
final short[] hashTable = new short[HASH_TABLE_SIZE_64K];
++sOff;
main:
while (true) {
// find a match
int forwardOff = sOff;
int ref;
int step = 1;
int searchMatchNb = 1 << SKIP_STRENGTH;
do {
sOff = forwardOff;
forwardOff += step;
step = searchMatchNb++ >>> SKIP_STRENGTH;
if (forwardOff > mflimit) {
break main;
}
final int h = hash64k(ByteBufferUtils.readInt(src, sOff));
ref = srcOff + SafeUtils.readShort(hashTable, h);
SafeUtils.writeShort(hashTable, h, sOff - srcOff);
} while (!LZ4ByteBufferUtils.readIntEquals(src, ref, sOff));
// catch up
final int excess = LZ4ByteBufferUtils.commonBytesBackward(src, ref, sOff, srcOff, anchor);
sOff -= excess;
ref -= excess;
// sequence == refsequence
final int runLen = sOff - anchor;
// encode literal length
int tokenOff = dOff++;
if (dOff + runLen + (2 + 1 + LAST_LITERALS) + (runLen >>> 8) > destEnd) {
throw new LZ4Exception("maxDestLen is too small");
}
if (runLen >= RUN_MASK) {
ByteBufferUtils.writeByte(dest, tokenOff, RUN_MASK << ML_BITS);
dOff = LZ4ByteBufferUtils.writeLen(runLen - RUN_MASK, dest, dOff);
} else {
ByteBufferUtils.writeByte(dest, tokenOff, runLen << ML_BITS);
}
// copy literals
LZ4ByteBufferUtils.wildArraycopy(src, anchor, dest, dOff, runLen);
dOff += runLen;
while (true) {
// encode offset
ByteBufferUtils.writeShortLE(dest, dOff, (short) (sOff - ref));
dOff += 2;
// count nb matches
sOff += MIN_MATCH;
ref += MIN_MATCH;
final int matchLen = LZ4ByteBufferUtils.commonBytes(src, ref, sOff, srcLimit);
if (dOff + (1 + LAST_LITERALS) + (matchLen >>> 8) > destEnd) {
throw new LZ4Exception("maxDestLen is too small");
}
sOff += matchLen;
// encode match len
if (matchLen >= ML_MASK) {
ByteBufferUtils.writeByte(dest, tokenOff, ByteBufferUtils.readByte(dest, tokenOff) | ML_MASK);
dOff = LZ4ByteBufferUtils.writeLen(matchLen - ML_MASK, dest, dOff);
} else {
ByteBufferUtils.writeByte(dest, tokenOff, ByteBufferUtils.readByte(dest, tokenOff) | matchLen);
}
// test end of chunk
if (sOff > mflimit) {
anchor = sOff;
break main;
}
// fill table
SafeUtils.writeShort(hashTable, hash64k(ByteBufferUtils.readInt(src, sOff - 2)), sOff - 2 - srcOff);
// test next position
final int h = hash64k(ByteBufferUtils.readInt(src, sOff));
ref = srcOff + SafeUtils.readShort(hashTable, h);
SafeUtils.writeShort(hashTable, h, sOff - srcOff);
if (!LZ4ByteBufferUtils.readIntEquals(src, sOff, ref)) {
break;
}
tokenOff = dOff++;
ByteBufferUtils.writeByte(dest, tokenOff, 0);
}
// prepare next loop
anchor = sOff++;
}
}
dOff = LZ4ByteBufferUtils.lastLiterals(src, anchor, srcEnd - anchor, dest, dOff, destEnd);
return dOff - destOff;
}
@Override
public int compress(ByteBuffer src, final int srcOff, int srcLen, ByteBuffer dest, final int destOff, int maxDestLen) {
if (src.hasArray() && dest.hasArray()) {
return compress(src.array(), srcOff + src.arrayOffset(), srcLen, dest.array(), destOff + dest.arrayOffset(), maxDestLen);
}
src = ByteBufferUtils.inNativeByteOrder(src);
dest = ByteBufferUtils.inNativeByteOrder(dest);
ByteBufferUtils.checkRange(src, srcOff, srcLen);
ByteBufferUtils.checkRange(dest, destOff, maxDestLen);
final int destEnd = destOff + maxDestLen;
if (srcLen < LZ4_64K_LIMIT) {
return compress64k(src, srcOff, srcLen, dest, destOff, destEnd);
}
final int srcEnd = srcOff + srcLen;
final int srcLimit = srcEnd - LAST_LITERALS;
final int mflimit = srcEnd - MF_LIMIT;
int sOff = srcOff, dOff = destOff;
int anchor = sOff++;
final int[] hashTable = new int[HASH_TABLE_SIZE];
Arrays.fill(hashTable, anchor);
main:
while (true) {
// find a match
int forwardOff = sOff;
int ref;
int step = 1;
int searchMatchNb = 1 << SKIP_STRENGTH;
int back;
do {
sOff = forwardOff;
forwardOff += step;
step = searchMatchNb++ >>> SKIP_STRENGTH;
if (forwardOff > mflimit) {
break main;
}
final int h = hash(ByteBufferUtils.readInt(src, sOff));
ref = SafeUtils.readInt(hashTable, h);
back = sOff - ref;
SafeUtils.writeInt(hashTable, h, sOff);
} while (back >= MAX_DISTANCE || !LZ4ByteBufferUtils.readIntEquals(src, ref, sOff));
final int excess = LZ4ByteBufferUtils.commonBytesBackward(src, ref, sOff, srcOff, anchor);
sOff -= excess;
ref -= excess;
// sequence == refsequence
final int runLen = sOff - anchor;
// encode literal length
int tokenOff = dOff++;
if (dOff + runLen + (2 + 1 + LAST_LITERALS) + (runLen >>> 8) > destEnd) {
throw new LZ4Exception("maxDestLen is too small");
}
if (runLen >= RUN_MASK) {
ByteBufferUtils.writeByte(dest, tokenOff, RUN_MASK << ML_BITS);
dOff = LZ4ByteBufferUtils.writeLen(runLen - RUN_MASK, dest, dOff);
} else {
ByteBufferUtils.writeByte(dest, tokenOff, runLen << ML_BITS);
}
// copy literals
LZ4ByteBufferUtils.wildArraycopy(src, anchor, dest, dOff, runLen);
dOff += runLen;
while (true) {
// encode offset
ByteBufferUtils.writeShortLE(dest, dOff, back);
dOff += 2;
// count nb matches
sOff += MIN_MATCH;
final int matchLen = LZ4ByteBufferUtils.commonBytes(src, ref + MIN_MATCH, sOff, srcLimit);
if (dOff + (1 + LAST_LITERALS) + (matchLen >>> 8) > destEnd) {
throw new LZ4Exception("maxDestLen is too small");
}
sOff += matchLen;
// encode match len
if (matchLen >= ML_MASK) {
ByteBufferUtils.writeByte(dest, tokenOff, ByteBufferUtils.readByte(dest, tokenOff) | ML_MASK);
dOff = LZ4ByteBufferUtils.writeLen(matchLen - ML_MASK, dest, dOff);
} else {
ByteBufferUtils.writeByte(dest, tokenOff, ByteBufferUtils.readByte(dest, tokenOff) | matchLen);
}
// test end of chunk
if (sOff > mflimit) {
anchor = sOff;
break main;
}
// fill table
SafeUtils.writeInt(hashTable, hash(ByteBufferUtils.readInt(src, sOff - 2)), sOff - 2);
// test next position
final int h = hash(ByteBufferUtils.readInt(src, sOff));
ref = SafeUtils.readInt(hashTable, h);
SafeUtils.writeInt(hashTable, h, sOff);
back = sOff - ref;
if (back >= MAX_DISTANCE || !LZ4ByteBufferUtils.readIntEquals(src, ref, sOff)) {
break;
}
tokenOff = dOff++;
ByteBufferUtils.writeByte(dest, tokenOff, 0);
}
// prepare next loop
anchor = sOff++;
}
dOff = LZ4ByteBufferUtils.lastLiterals(src, anchor, srcEnd - anchor, dest, dOff, destEnd);
return dOff - destOff;
}
}

Datei anzeigen

@ -1,209 +0,0 @@
// Auto-generated: DO NOT EDIT
package net.jpountz.lz4;
import net.jpountz.util.ByteBufferUtils;
import net.jpountz.util.SafeUtils;
import java.nio.ByteBuffer;
import static net.jpountz.lz4.LZ4Constants.COPY_LENGTH;
import static net.jpountz.lz4.LZ4Constants.MIN_MATCH;
import static net.jpountz.lz4.LZ4Constants.ML_BITS;
import static net.jpountz.lz4.LZ4Constants.ML_MASK;
import static net.jpountz.lz4.LZ4Constants.RUN_MASK;
/**
* Decompressor.
*/
final class LZ4JavaSafeFastDecompressor extends LZ4FastDecompressor {
public static final LZ4FastDecompressor INSTANCE = new LZ4JavaSafeFastDecompressor();
@Override
public int decompress(byte[] src, final int srcOff, byte[] dest, final int destOff, int destLen) {
SafeUtils.checkRange(src, srcOff);
SafeUtils.checkRange(dest, destOff, destLen);
if (destLen == 0) {
if (SafeUtils.readByte(src, srcOff) != 0) {
throw new LZ4Exception("Malformed input at " + srcOff);
}
return 1;
}
final int destEnd = destOff + destLen;
int sOff = srcOff;
int dOff = destOff;
while (true) {
final int token = SafeUtils.readByte(src, sOff) & 0xFF;
++sOff;
// literals
int literalLen = token >>> ML_BITS;
if (literalLen == RUN_MASK) {
byte len = (byte) 0xFF;
while ((len = SafeUtils.readByte(src, sOff++)) == (byte) 0xFF) {
literalLen += 0xFF;
}
literalLen += len & 0xFF;
}
final int literalCopyEnd = dOff + literalLen;
if (literalCopyEnd > destEnd - COPY_LENGTH) {
if (literalCopyEnd != destEnd) {
throw new LZ4Exception("Malformed input at " + sOff);
} else {
LZ4SafeUtils.safeArraycopy(src, sOff, dest, dOff, literalLen);
sOff += literalLen;
dOff = literalCopyEnd;
break; // EOF
}
}
LZ4SafeUtils.wildArraycopy(src, sOff, dest, dOff, literalLen);
sOff += literalLen;
dOff = literalCopyEnd;
// matchs
final int matchDec = SafeUtils.readShortLE(src, sOff);
sOff += 2;
int matchOff = dOff - matchDec;
if (matchOff < destOff) {
throw new LZ4Exception("Malformed input at " + sOff);
}
int matchLen = token & ML_MASK;
if (matchLen == ML_MASK) {
byte len = (byte) 0xFF;
while ((len = SafeUtils.readByte(src, sOff++)) == (byte) 0xFF) {
matchLen += 0xFF;
}
matchLen += len & 0xFF;
}
matchLen += MIN_MATCH;
final int matchCopyEnd = dOff + matchLen;
if (matchCopyEnd > destEnd - COPY_LENGTH) {
if (matchCopyEnd > destEnd) {
throw new LZ4Exception("Malformed input at " + sOff);
}
LZ4SafeUtils.safeIncrementalCopy(dest, matchOff, dOff, matchLen);
} else {
LZ4SafeUtils.wildIncrementalCopy(dest, matchOff, dOff, matchCopyEnd);
}
dOff = matchCopyEnd;
}
return sOff - srcOff;
}
@Override
public int decompress(ByteBuffer src, final int srcOff, ByteBuffer dest, final int destOff, int destLen) {
if (src.hasArray() && dest.hasArray()) {
return decompress(src.array(), srcOff + src.arrayOffset(), dest.array(), destOff + dest.arrayOffset(), destLen);
}
src = ByteBufferUtils.inNativeByteOrder(src);
dest = ByteBufferUtils.inNativeByteOrder(dest);
ByteBufferUtils.checkRange(src, srcOff);
ByteBufferUtils.checkRange(dest, destOff, destLen);
if (destLen == 0) {
if (ByteBufferUtils.readByte(src, srcOff) != 0) {
throw new LZ4Exception("Malformed input at " + srcOff);
}
return 1;
}
final int destEnd = destOff + destLen;
int sOff = srcOff;
int dOff = destOff;
while (true) {
final int token = ByteBufferUtils.readByte(src, sOff) & 0xFF;
++sOff;
// literals
int literalLen = token >>> ML_BITS;
if (literalLen == RUN_MASK) {
byte len = (byte) 0xFF;
while ((len = ByteBufferUtils.readByte(src, sOff++)) == (byte) 0xFF) {
literalLen += 0xFF;
}
literalLen += len & 0xFF;
}
final int literalCopyEnd = dOff + literalLen;
if (literalCopyEnd > destEnd - COPY_LENGTH) {
if (literalCopyEnd != destEnd) {
throw new LZ4Exception("Malformed input at " + sOff);
} else {
LZ4ByteBufferUtils.safeArraycopy(src, sOff, dest, dOff, literalLen);
sOff += literalLen;
dOff = literalCopyEnd;
break; // EOF
}
}
LZ4ByteBufferUtils.wildArraycopy(src, sOff, dest, dOff, literalLen);
sOff += literalLen;
dOff = literalCopyEnd;
// matchs
final int matchDec = ByteBufferUtils.readShortLE(src, sOff);
sOff += 2;
int matchOff = dOff - matchDec;
if (matchOff < destOff) {
throw new LZ4Exception("Malformed input at " + sOff);
}
int matchLen = token & ML_MASK;
if (matchLen == ML_MASK) {
byte len = (byte) 0xFF;
while ((len = ByteBufferUtils.readByte(src, sOff++)) == (byte) 0xFF) {
matchLen += 0xFF;
}
matchLen += len & 0xFF;
}
matchLen += MIN_MATCH;
final int matchCopyEnd = dOff + matchLen;
if (matchCopyEnd > destEnd - COPY_LENGTH) {
if (matchCopyEnd > destEnd) {
throw new LZ4Exception("Malformed input at " + sOff);
}
LZ4ByteBufferUtils.safeIncrementalCopy(dest, matchOff, dOff, matchLen);
} else {
LZ4ByteBufferUtils.wildIncrementalCopy(dest, matchOff, dOff, matchCopyEnd);
}
dOff = matchCopyEnd;
}
return sOff - srcOff;
}
}

Datei anzeigen

@ -1,217 +0,0 @@
// Auto-generated: DO NOT EDIT
package net.jpountz.lz4;
import net.jpountz.util.ByteBufferUtils;
import net.jpountz.util.SafeUtils;
import java.nio.ByteBuffer;
import static net.jpountz.lz4.LZ4Constants.COPY_LENGTH;
import static net.jpountz.lz4.LZ4Constants.MIN_MATCH;
import static net.jpountz.lz4.LZ4Constants.ML_BITS;
import static net.jpountz.lz4.LZ4Constants.ML_MASK;
import static net.jpountz.lz4.LZ4Constants.RUN_MASK;
/**
* Decompressor.
*/
final class LZ4JavaSafeSafeDecompressor extends LZ4SafeDecompressor {
public static final LZ4SafeDecompressor INSTANCE = new LZ4JavaSafeSafeDecompressor();
@Override
public int decompress(byte[] src, final int srcOff, final int srcLen, byte[] dest, final int destOff, int destLen) {
SafeUtils.checkRange(src, srcOff, srcLen);
SafeUtils.checkRange(dest, destOff, destLen);
if (destLen == 0) {
if (srcLen != 1 || SafeUtils.readByte(src, srcOff) != 0) {
throw new LZ4Exception("Output buffer too small");
}
return 0;
}
final int srcEnd = srcOff + srcLen;
final int destEnd = destOff + destLen;
int sOff = srcOff;
int dOff = destOff;
while (true) {
final int token = SafeUtils.readByte(src, sOff) & 0xFF;
++sOff;
// literals
int literalLen = token >>> ML_BITS;
if (literalLen == RUN_MASK) {
byte len = (byte) 0xFF;
while (sOff < srcEnd && (len = SafeUtils.readByte(src, sOff++)) == (byte) 0xFF) {
literalLen += 0xFF;
}
literalLen += len & 0xFF;
}
final int literalCopyEnd = dOff + literalLen;
if (literalCopyEnd > destEnd - COPY_LENGTH || sOff + literalLen > srcEnd - COPY_LENGTH) {
if (literalCopyEnd > destEnd) {
throw new LZ4Exception();
} else if (sOff + literalLen != srcEnd) {
throw new LZ4Exception("Malformed input at " + sOff);
} else {
LZ4SafeUtils.safeArraycopy(src, sOff, dest, dOff, literalLen);
sOff += literalLen;
dOff = literalCopyEnd;
break; // EOF
}
}
LZ4SafeUtils.wildArraycopy(src, sOff, dest, dOff, literalLen);
sOff += literalLen;
dOff = literalCopyEnd;
// matchs
final int matchDec = SafeUtils.readShortLE(src, sOff);
sOff += 2;
int matchOff = dOff - matchDec;
if (matchOff < destOff) {
throw new LZ4Exception("Malformed input at " + sOff);
}
int matchLen = token & ML_MASK;
if (matchLen == ML_MASK) {
byte len = (byte) 0xFF;
while (sOff < srcEnd && (len = SafeUtils.readByte(src, sOff++)) == (byte) 0xFF) {
matchLen += 0xFF;
}
matchLen += len & 0xFF;
}
matchLen += MIN_MATCH;
final int matchCopyEnd = dOff + matchLen;
if (matchCopyEnd > destEnd - COPY_LENGTH) {
if (matchCopyEnd > destEnd) {
throw new LZ4Exception("Malformed input at " + sOff);
}
LZ4SafeUtils.safeIncrementalCopy(dest, matchOff, dOff, matchLen);
} else {
LZ4SafeUtils.wildIncrementalCopy(dest, matchOff, dOff, matchCopyEnd);
}
dOff = matchCopyEnd;
}
return dOff - destOff;
}
@Override
public int decompress(ByteBuffer src, final int srcOff, final int srcLen, ByteBuffer dest, final int destOff, int destLen) {
if (src.hasArray() && dest.hasArray()) {
return decompress(src.array(), srcOff + src.arrayOffset(), srcLen, dest.array(), destOff + dest.arrayOffset(), destLen);
}
src = ByteBufferUtils.inNativeByteOrder(src);
dest = ByteBufferUtils.inNativeByteOrder(dest);
ByteBufferUtils.checkRange(src, srcOff, srcLen);
ByteBufferUtils.checkRange(dest, destOff, destLen);
if (destLen == 0) {
if (srcLen != 1 || ByteBufferUtils.readByte(src, srcOff) != 0) {
throw new LZ4Exception("Output buffer too small");
}
return 0;
}
final int srcEnd = srcOff + srcLen;
final int destEnd = destOff + destLen;
int sOff = srcOff;
int dOff = destOff;
while (true) {
final int token = ByteBufferUtils.readByte(src, sOff) & 0xFF;
++sOff;
// literals
int literalLen = token >>> ML_BITS;
if (literalLen == RUN_MASK) {
byte len = (byte) 0xFF;
while (sOff < srcEnd && (len = ByteBufferUtils.readByte(src, sOff++)) == (byte) 0xFF) {
literalLen += 0xFF;
}
literalLen += len & 0xFF;
}
final int literalCopyEnd = dOff + literalLen;
if (literalCopyEnd > destEnd - COPY_LENGTH || sOff + literalLen > srcEnd - COPY_LENGTH) {
if (literalCopyEnd > destEnd) {
throw new LZ4Exception();
} else if (sOff + literalLen != srcEnd) {
throw new LZ4Exception("Malformed input at " + sOff);
} else {
LZ4ByteBufferUtils.safeArraycopy(src, sOff, dest, dOff, literalLen);
sOff += literalLen;
dOff = literalCopyEnd;
break; // EOF
}
}
LZ4ByteBufferUtils.wildArraycopy(src, sOff, dest, dOff, literalLen);
sOff += literalLen;
dOff = literalCopyEnd;
// matchs
final int matchDec = ByteBufferUtils.readShortLE(src, sOff);
sOff += 2;
int matchOff = dOff - matchDec;
if (matchOff < destOff) {
throw new LZ4Exception("Malformed input at " + sOff);
}
int matchLen = token & ML_MASK;
if (matchLen == ML_MASK) {
byte len = (byte) 0xFF;
while (sOff < srcEnd && (len = ByteBufferUtils.readByte(src, sOff++)) == (byte) 0xFF) {
matchLen += 0xFF;
}
matchLen += len & 0xFF;
}
matchLen += MIN_MATCH;
final int matchCopyEnd = dOff + matchLen;
if (matchCopyEnd > destEnd - COPY_LENGTH) {
if (matchCopyEnd > destEnd) {
throw new LZ4Exception("Malformed input at " + sOff);
}
LZ4ByteBufferUtils.safeIncrementalCopy(dest, matchOff, dOff, matchLen);
} else {
LZ4ByteBufferUtils.wildIncrementalCopy(dest, matchOff, dOff, matchCopyEnd);
}
dOff = matchCopyEnd;
}
return dOff - destOff;
}
}

Datei anzeigen

@ -1,77 +0,0 @@
package net.jpountz.lz4;
import java.io.IOException;
import java.io.OutputStream;
public class LZ4OutputStream extends OutputStream {
private static final LZ4Factory lz4Factory = LZ4Factory.fastestInstance();
private final LZ4Compressor compressor;
private static final int ONE_MEGABYTE = 1048576;
private final byte[] compressionInputBuffer;
private final byte[] compressionOutputBuffer;
private final OutputStream underlyingOutputStream;
private int bytesRemainingInCompressionInputBuffer = 0;
private int currentCompressionInputBufferPosition = 0;
public LZ4OutputStream(OutputStream os) throws IOException {
this(os, ONE_MEGABYTE, lz4Factory.fastCompressor());
}
public LZ4OutputStream(OutputStream os, int size) throws IOException {
this(os, size, lz4Factory.fastCompressor());
}
public LZ4OutputStream(OutputStream underlyingOutputStream, int blocksize, LZ4Compressor compressor) throws IOException {
compressionInputBuffer = new byte[blocksize];
this.compressor = compressor;
this.underlyingOutputStream = underlyingOutputStream;
this.bytesRemainingInCompressionInputBuffer = blocksize;
this.currentCompressionInputBufferPosition = 0;
this.compressionOutputBuffer = new byte[compressor.maxCompressedLength(blocksize)];
}
public void write(byte[] b, int off, int len) throws IOException {
if (len <= bytesRemainingInCompressionInputBuffer) {
System.arraycopy(b, off, compressionInputBuffer, currentCompressionInputBufferPosition, len);
currentCompressionInputBufferPosition += len;
bytesRemainingInCompressionInputBuffer -= len;
} else {
// len > bytesRemainingInCompressionInputBuffer
while (len > 0) {
int bytesToCopy = Math.min(bytesRemainingInCompressionInputBuffer, len);
System.arraycopy(b, off, compressionInputBuffer, currentCompressionInputBufferPosition, bytesToCopy);
currentCompressionInputBufferPosition += bytesToCopy;
bytesRemainingInCompressionInputBuffer -= bytesToCopy;
flush();
len -= bytesToCopy;
off += bytesToCopy;
}
}
}
public void write(int i) throws IOException {
byte b = (byte) i;
if (0 == bytesRemainingInCompressionInputBuffer) {
flush();
}
compressionInputBuffer[currentCompressionInputBufferPosition] = b;
bytesRemainingInCompressionInputBuffer--;
currentCompressionInputBufferPosition++;
}
public void flush() throws IOException {
if (currentCompressionInputBufferPosition > 0) {
LZ4StreamHelper.writeLength(currentCompressionInputBufferPosition, this.underlyingOutputStream);
int bytesCompressed = compressor.compress(compressionInputBuffer, 0, currentCompressionInputBufferPosition, compressionOutputBuffer, 0, compressionOutputBuffer.length);
LZ4StreamHelper.writeLength(bytesCompressed, this.underlyingOutputStream);
underlyingOutputStream.write(compressionOutputBuffer, 0, bytesCompressed);
bytesRemainingInCompressionInputBuffer = compressionInputBuffer.length;
currentCompressionInputBufferPosition = 0;
}
}
public void close() throws IOException {
flush();
underlyingOutputStream.close();
}
}

Datei anzeigen

@ -1,117 +0,0 @@
package net.jpountz.lz4;
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.nio.ByteBuffer;
import java.util.Arrays;
/**
* LZ4 decompressor that requires the size of the compressed data to be known.
* <p>
* Implementations of this class are usually a little slower than those of
* {@link LZ4FastDecompressor} but do not require the size of the original data to
* be known.
*/
public abstract class LZ4SafeDecompressor implements LZ4UnknownSizeDecompressor {
/**
* Decompress <code>src[srcOff:srcLen]</code> into
* <code>dest[destOff:destOff+maxDestLen]</code> and returns the number of
* decompressed bytes written into <code>dest</code>.
*
* @param srcLen the exact size of the compressed stream
* @return the original input size
* @throws LZ4Exception if maxDestLen is too small
*/
public abstract int decompress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen);
/**
* Uncompress <code>src[srcOff:srcLen]</code> into
* <code>dest[destOff:destOff+maxDestLen]</code> and returns the number of
* decompressed bytes written into <code>dest</code>.
*
* @param srcLen the exact size of the compressed stream
* @return the original input size
* @throws LZ4Exception if maxDestLen is too small
*/
public abstract int decompress(ByteBuffer src, int srcOff, int srcLen, ByteBuffer dest, int destOff, int maxDestLen);
/**
* Convenience method, equivalent to calling
* {@link #decompress(byte[], int, int, byte[], int, int) decompress(src, srcOff, srcLen, dest, destOff, dest.length - destOff)}.
*/
public final int decompress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff) {
return decompress(src, srcOff, srcLen, dest, destOff, dest.length - destOff);
}
/**
* Convenience method, equivalent to calling
* {@link #decompress(byte[], int, int, byte[], int) decompress(src, 0, src.length, dest, 0)}
*/
public final int decompress(byte[] src, byte[] dest) {
return decompress(src, 0, src.length, dest, 0);
}
/**
* Convenience method which returns <code>src[srcOff:srcOff+srcLen]</code>
* decompressed.
* <p><b><span style="color:red">Warning</span></b>: this method has an
* important overhead due to the fact that it needs to allocate a buffer to
* decompress into, and then needs to resize this buffer to the actual
* decompressed length.</p>
* <p>Here is how this method is implemented:</p>
* <pre>
* byte[] decompressed = new byte[maxDestLen];
* final int decompressedLength = decompress(src, srcOff, srcLen, decompressed, 0, maxDestLen);
* if (decompressedLength != decompressed.length) {
* decompressed = Arrays.copyOf(decompressed, decompressedLength);
* }
* return decompressed;
* </pre>
*/
public final byte[] decompress(byte[] src, int srcOff, int srcLen, int maxDestLen) {
byte[] decompressed = new byte[maxDestLen];
final int decompressedLength = decompress(src, srcOff, srcLen, decompressed, 0, maxDestLen);
if (decompressedLength != decompressed.length) {
decompressed = Arrays.copyOf(decompressed, decompressedLength);
}
return decompressed;
}
/**
* Convenience method, equivalent to calling
* {@link #decompress(byte[], int, int, int) decompress(src, 0, src.length, maxDestLen)}.
*/
public final byte[] decompress(byte[] src, int maxDestLen) {
return decompress(src, 0, src.length, maxDestLen);
}
/**
* Decompress <code>src</code> into <code>dest</code>. <code>src</code>'s
* {@link ByteBuffer#remaining()} must be exactly the size of the compressed
* data. This method moves the positions of the buffers.
*/
public final void decompress(ByteBuffer src, ByteBuffer dest) {
final int decompressed = decompress(src, src.position(), src.remaining(), dest, dest.position(), dest.remaining());
src.position(src.limit());
dest.position(dest.position() + decompressed);
}
@Override
public String toString() {
return getClass().getSimpleName();
}
}

Datei anzeigen

@ -1,180 +0,0 @@
package net.jpountz.lz4;
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import net.jpountz.util.SafeUtils;
import static net.jpountz.lz4.LZ4Constants.LAST_LITERALS;
import static net.jpountz.lz4.LZ4Constants.ML_BITS;
import static net.jpountz.lz4.LZ4Constants.ML_MASK;
import static net.jpountz.lz4.LZ4Constants.RUN_MASK;
enum LZ4SafeUtils {
;
static int hash(byte[] buf, int i) {
return LZ4Utils.hash(SafeUtils.readInt(buf, i));
}
static int hash64k(byte[] buf, int i) {
return LZ4Utils.hash64k(SafeUtils.readInt(buf, i));
}
static boolean readIntEquals(byte[] buf, int i, int j) {
return buf[i] == buf[j] && buf[i + 1] == buf[j + 1] && buf[i + 2] == buf[j + 2] && buf[i + 3] == buf[j + 3];
}
static void safeIncrementalCopy(byte[] dest, int matchOff, int dOff, int matchLen) {
for (int i = 0; i < matchLen; ++i) {
dest[dOff + i] = dest[matchOff + i];
}
}
static void wildIncrementalCopy(byte[] dest, int matchOff, int dOff, int matchCopyEnd) {
do {
copy8Bytes(dest, matchOff, dest, dOff);
matchOff += 8;
dOff += 8;
} while (dOff < matchCopyEnd);
}
static void copy8Bytes(byte[] src, int sOff, byte[] dest, int dOff) {
for (int i = 0; i < 8; ++i) {
dest[dOff + i] = src[sOff + i];
}
}
static int commonBytes(byte[] b, int o1, int o2, int limit) {
int count = 0;
while (o2 < limit && b[o1++] == b[o2++]) {
++count;
}
return count;
}
static int commonBytesBackward(byte[] b, int o1, int o2, int l1, int l2) {
int count = 0;
while (o1 > l1 && o2 > l2 && b[--o1] == b[--o2]) {
++count;
}
return count;
}
static void safeArraycopy(byte[] src, int sOff, byte[] dest, int dOff, int len) {
System.arraycopy(src, sOff, dest, dOff, len);
}
static void wildArraycopy(byte[] src, int sOff, byte[] dest, int dOff, int len) {
try {
for (int i = 0; i < len; i += 8) {
copy8Bytes(src, sOff + i, dest, dOff + i);
}
} catch (ArrayIndexOutOfBoundsException e) {
throw new LZ4Exception("Malformed input at offset " + sOff);
}
}
static int encodeSequence(byte[] src, int anchor, int matchOff, int matchRef, int matchLen, byte[] dest, int dOff, int destEnd) {
final int runLen = matchOff - anchor;
final int tokenOff = dOff++;
if (dOff + runLen + (2 + 1 + LAST_LITERALS) + (runLen >>> 8) > destEnd) {
throw new LZ4Exception("maxDestLen is too small");
}
int token;
if (runLen >= RUN_MASK) {
token = (byte) (RUN_MASK << ML_BITS);
dOff = writeLen(runLen - RUN_MASK, dest, dOff);
} else {
token = runLen << ML_BITS;
}
// copy literals
wildArraycopy(src, anchor, dest, dOff, runLen);
dOff += runLen;
// encode offset
final int matchDec = matchOff - matchRef;
dest[dOff++] = (byte) matchDec;
dest[dOff++] = (byte) (matchDec >>> 8);
// encode match len
matchLen -= 4;
if (dOff + (1 + LAST_LITERALS) + (matchLen >>> 8) > destEnd) {
throw new LZ4Exception("maxDestLen is too small");
}
if (matchLen >= ML_MASK) {
token |= ML_MASK;
dOff = writeLen(matchLen - RUN_MASK, dest, dOff);
} else {
token |= matchLen;
}
dest[tokenOff] = (byte) token;
return dOff;
}
static int lastLiterals(byte[] src, int sOff, int srcLen, byte[] dest, int dOff, int destEnd) {
final int runLen = srcLen;
if (dOff + runLen + 1 + (runLen + 255 - RUN_MASK) / 255 > destEnd) {
throw new LZ4Exception();
}
if (runLen >= RUN_MASK) {
dest[dOff++] = (byte) (RUN_MASK << ML_BITS);
dOff = writeLen(runLen - RUN_MASK, dest, dOff);
} else {
dest[dOff++] = (byte) (runLen << ML_BITS);
}
// copy literals
System.arraycopy(src, sOff, dest, dOff, runLen);
dOff += runLen;
return dOff;
}
static int writeLen(int len, byte[] dest, int dOff) {
while (len >= 0xFF) {
dest[dOff++] = (byte) 0xFF;
len -= 0xFF;
}
dest[dOff++] = (byte) len;
return dOff;
}
static class Match {
int start, ref, len;
void fix(int correction) {
start += correction;
ref += correction;
len -= correction;
}
int end() {
return start + len;
}
}
static void copyTo(Match m1, Match m2) {
m2.len = m1.len;
m2.start = m1.start;
m2.ref = m1.ref;
}
}

Datei anzeigen

@ -1,35 +0,0 @@
package net.jpountz.lz4;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
public class LZ4StreamHelper {
static void writeLength(int length, OutputStream os) throws IOException {
int b1 = ((length & 0xff000000) >> 24);
int b2 = ((length & 0x00ff0000) >> 16);
int b3 = ((length & 0x0000ff00) >> 8);
int b4 = (length & 0xff0000ff);
os.write(b1);
os.write(b2);
os.write(b3);
os.write(b4);
}
// network order, big endian, most significant byte first
// package scope
static int readLength(InputStream is) throws IOException {
int b1 = is.read();
int b2 = is.read();
int b3 = is.read();
int b4 = is.read();
int length;
if ((-1 == b1) || (-1 == b2) || (-1 == b3) || (-1 == b4)) {
length = -1;
} else {
length = ((b1 << 24) | (b2 << 16) | (b3 << 8) | b4);
}
return length;
}
}

Datei anzeigen

@ -1,27 +0,0 @@
package net.jpountz.lz4;
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @deprecated Use {@link LZ4SafeDecompressor} instead.
*/
@Deprecated
public interface LZ4UnknownSizeDecompressor {
int decompress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen);
int decompress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff);
}

Datei anzeigen

@ -1,68 +0,0 @@
package net.jpountz.lz4;
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import static net.jpountz.lz4.LZ4Constants.HASH_LOG;
import static net.jpountz.lz4.LZ4Constants.HASH_LOG_64K;
import static net.jpountz.lz4.LZ4Constants.HASH_LOG_HC;
import static net.jpountz.lz4.LZ4Constants.MIN_MATCH;
public enum LZ4Utils {
;
private static final int MAX_INPUT_SIZE = 0x7E000000;
public static int maxCompressedLength(int length) {
if (length < 0) {
throw new IllegalArgumentException("length must be >= 0, got " + length);
} else if (length >= MAX_INPUT_SIZE) {
throw new IllegalArgumentException("length must be < " + MAX_INPUT_SIZE);
}
return length + length / 255 + 16;
}
public static int hash(int i) {
return (i * -1640531535) >>> ((MIN_MATCH * 8) - HASH_LOG);
}
public static int hash64k(int i) {
return (i * -1640531535) >>> ((MIN_MATCH * 8) - HASH_LOG_64K);
}
public static int hashHC(int i) {
return (i * -1640531535) >>> ((MIN_MATCH * 8) - HASH_LOG_HC);
}
public static class Match {
int start, ref, len;
void fix(int correction) {
start += correction;
ref += correction;
len -= correction;
}
int end() {
return start + len;
}
}
public static void copyTo(Match m1, Match m2) {
m2.len = m1.len;
m2.start = m1.start;
m2.ref = m1.ref;
}
}

Datei anzeigen

@ -1,92 +0,0 @@
package net.jpountz.util;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.ReadOnlyBufferException;
public enum ByteBufferUtils {
;
public static void checkRange(ByteBuffer buf, int off, int len) {
SafeUtils.checkLength(len);
if (len > 0) {
checkRange(buf, off);
checkRange(buf, off + len - 1);
}
}
public static void checkRange(ByteBuffer buf, int off) {
if (off < 0 || off >= buf.capacity()) {
throw new ArrayIndexOutOfBoundsException(off);
}
}
public static ByteBuffer inLittleEndianOrder(ByteBuffer buf) {
if (buf.order().equals(ByteOrder.LITTLE_ENDIAN)) {
return buf;
} else {
return buf.duplicate().order(ByteOrder.LITTLE_ENDIAN);
}
}
public static ByteBuffer inNativeByteOrder(ByteBuffer buf) {
if (buf.order().equals(Utils.NATIVE_BYTE_ORDER)) {
return buf;
} else {
return buf.duplicate().order(Utils.NATIVE_BYTE_ORDER);
}
}
public static byte readByte(ByteBuffer buf, int i) {
return buf.get(i);
}
public static void writeInt(ByteBuffer buf, int i, int v) {
assert buf.order() == Utils.NATIVE_BYTE_ORDER;
buf.putInt(i, v);
}
public static int readInt(ByteBuffer buf, int i) {
assert buf.order() == Utils.NATIVE_BYTE_ORDER;
return buf.getInt(i);
}
public static int readIntLE(ByteBuffer buf, int i) {
assert buf.order() == ByteOrder.LITTLE_ENDIAN;
return buf.getInt(i);
}
public static void writeLong(ByteBuffer buf, int i, long v) {
assert buf.order() == Utils.NATIVE_BYTE_ORDER;
buf.putLong(i, v);
}
public static long readLong(ByteBuffer buf, int i) {
assert buf.order() == Utils.NATIVE_BYTE_ORDER;
return buf.getLong(i);
}
public static long readLongLE(ByteBuffer buf, int i) {
assert buf.order() == ByteOrder.LITTLE_ENDIAN;
return buf.getLong(i);
}
public static void writeByte(ByteBuffer dest, int off, int i) {
dest.put(off, (byte) i);
}
public static void writeShortLE(ByteBuffer dest, int off, int i) {
dest.put(off, (byte) i);
dest.put(off + 1, (byte) (i >>> 8));
}
public static void checkNotReadOnly(ByteBuffer buffer) {
if (buffer.isReadOnly()) {
throw new ReadOnlyBufferException();
}
}
public static int readShortLE(ByteBuffer buf, int i) {
return (buf.get(i) & 0xFF) | ((buf.get(i + 1) & 0xFF) << 8);
}
}

Datei anzeigen

@ -1,153 +0,0 @@
package net.jpountz.util;
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This file has been modified for use in the FAWE project.
*/
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
/** FOR INTERNAL USE ONLY */
public enum Native {
;
private enum OS {
// Even on Windows, the default compiler from cpptasks (gcc) uses .so as a shared lib extension
WINDOWS("win32", "so"), LINUX("linux", "so"), MAC("darwin", "dylib"), SOLARIS("solaris", "so");
public final String name, libExtension;
private OS(String name, String libExtension) {
this.name = name;
this.libExtension = libExtension;
}
}
private static String arch() {
return System.getProperty("os.arch");
}
private static OS os() {
String osName = System.getProperty("os.name");
if (osName.contains("Linux")) {
return OS.LINUX;
} else if (osName.contains("Mac")) {
return OS.MAC;
} else if (osName.contains("Windows")) {
return OS.WINDOWS;
} else if (osName.contains("Solaris") || osName.contains("SunOS")) {
return OS.SOLARIS;
} else {
throw new UnsupportedOperationException("Unsupported operating system: "
+ osName);
}
}
private static String resourceName() {
OS os = os();
return "/" + os.name + "/" + arch() + "/liblz4-java." + os.libExtension;
}
private static boolean loaded = false;
public static synchronized boolean isLoaded() {
return loaded;
}
private static void cleanupOldTempLibs() {
String tempFolder = new File(System.getProperty("java.io.tmpdir")).getAbsolutePath();
File dir = new File(tempFolder);
File[] tempLibFiles = dir.listFiles((dir1, name) ->
name.startsWith("liblz4-java-") && !name.endsWith(".lck"));
if (tempLibFiles != null) {
for (File tempLibFile : tempLibFiles) {
File lckFile = new File(tempLibFile.getAbsolutePath() + ".lck");
if (!lckFile.exists()) {
try {
tempLibFile.delete();
}
catch (SecurityException e) {
System.err.println("Failed to delete old temp lib" + e.getMessage());
}
}
}
}
}
public static synchronized void load() {
if (loaded) {
return;
}
cleanupOldTempLibs();
// Try to load lz4-java (liblz4-java.so on Linux) from the java.library.path.
try {
System.loadLibrary("lz4-java");
loaded = true;
return;
} catch (UnsatisfiedLinkError ex) {
// Doesn't exist, so proceed to loading bundled library.
}
String resourceName = resourceName();
InputStream is = Native.class.getResourceAsStream(resourceName);
if (is == null) {
throw new UnsupportedOperationException("Unsupported OS/arch, cannot find " + resourceName + ". Please try building from source.");
}
File tempLib = null;
File tempLibLock = null;
try {
// Create the .lck file first to avoid a race condition
// with other concurrently running Java processes using lz4-java.
tempLibLock = File.createTempFile("liblz4-java-", "." + os().libExtension + ".lck");
tempLib = new File(tempLibLock.getAbsolutePath().replaceFirst(".lck$", ""));
// copy to tempLib
try (FileOutputStream out = new FileOutputStream(tempLib)) {
byte[] buf = new byte[4096];
while (true) {
int read = is.read(buf);
if (read == -1) {
break;
}
out.write(buf, 0, read);
}
}
System.load(tempLib.getAbsolutePath());
loaded = true;
} catch (IOException e) {
throw new ExceptionInInitializerError("Cannot unpack liblz4-java: " + e);
} finally {
if (!loaded) {
if (tempLib != null && tempLib.exists()) {
if (!tempLib.delete()) {
throw new ExceptionInInitializerError("Cannot unpack liblz4-java / cannot delete a temporary native library " + tempLib);
}
}
if (tempLibLock != null && tempLibLock.exists()) {
if (!tempLibLock.delete()) {
throw new ExceptionInInitializerError("Cannot unpack liblz4-java / cannot delete a temporary lock file " + tempLibLock);
}
}
} else {
tempLib.deleteOnExit();
tempLibLock.deleteOnExit();
}
}
}
}

Datei anzeigen

@ -1,95 +0,0 @@
package net.jpountz.util;
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.nio.ByteOrder;
public enum SafeUtils {
;
public static void checkRange(byte[] buf, int off) {
if (off < 0 || off >= buf.length) {
throw new ArrayIndexOutOfBoundsException(off);
}
}
public static void checkRange(byte[] buf, int off, int len) {
checkLength(len);
if (len > 0) {
checkRange(buf, off);
checkRange(buf, off + len - 1);
}
}
public static void checkLength(int len) {
if (len < 0) {
throw new IllegalArgumentException("lengths must be >= 0");
}
}
public static byte readByte(byte[] buf, int i) {
return buf[i];
}
public static int readIntBE(byte[] buf, int i) {
return ((buf[i] & 0xFF) << 24) | ((buf[i + 1] & 0xFF) << 16) | ((buf[i + 2] & 0xFF) << 8) | (buf[i + 3] & 0xFF);
}
public static int readIntLE(byte[] buf, int i) {
return (buf[i] & 0xFF) | ((buf[i + 1] & 0xFF) << 8) | ((buf[i + 2] & 0xFF) << 16) | ((buf[i + 3] & 0xFF) << 24);
}
public static int readInt(byte[] buf, int i) {
if (Utils.NATIVE_BYTE_ORDER == ByteOrder.BIG_ENDIAN) {
return readIntBE(buf, i);
} else {
return readIntLE(buf, i);
}
}
public static long readLongLE(byte[] buf, int i) {
return (buf[i] & 0xFFL) | ((buf[i + 1] & 0xFFL) << 8) | ((buf[i + 2] & 0xFFL) << 16) | ((buf[i + 3] & 0xFFL) << 24)
| ((buf[i + 4] & 0xFFL) << 32) | ((buf[i + 5] & 0xFFL) << 40) | ((buf[i + 6] & 0xFFL) << 48) | ((buf[i + 7] & 0xFFL) << 56);
}
public static void writeShortLE(byte[] buf, int off, int v) {
buf[off++] = (byte) v;
buf[off++] = (byte) (v >>> 8);
}
public static void writeInt(int[] buf, int off, int v) {
buf[off] = v;
}
public static int readInt(int[] buf, int off) {
return buf[off];
}
public static void writeByte(byte[] dest, int off, int i) {
dest[off] = (byte) i;
}
public static void writeShort(short[] buf, int off, int v) {
buf[off] = (short) v;
}
public static int readShortLE(byte[] buf, int i) {
return (buf[i] & 0xFF) | ((buf[i + 1] & 0xFF) << 8);
}
public static int readShort(short[] buf, int off) {
return buf[off] & 0xFFFF;
}
}

Datei anzeigen

@ -1,147 +0,0 @@
package net.jpountz.util;
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import sun.misc.Unsafe;
import java.lang.reflect.Field;
import java.nio.ByteOrder;
import static net.jpountz.util.Utils.NATIVE_BYTE_ORDER;
public enum UnsafeUtils {
;
private static final Unsafe UNSAFE;
private static final long BYTE_ARRAY_OFFSET;
private static final int BYTE_ARRAY_SCALE;
private static final long INT_ARRAY_OFFSET;
private static final int INT_ARRAY_SCALE;
private static final long SHORT_ARRAY_OFFSET;
private static final int SHORT_ARRAY_SCALE;
static {
try {
Field theUnsafe = Unsafe.class.getDeclaredField("theUnsafe");
theUnsafe.setAccessible(true);
UNSAFE = (Unsafe) theUnsafe.get(null);
BYTE_ARRAY_OFFSET = UNSAFE.arrayBaseOffset(byte[].class);
BYTE_ARRAY_SCALE = UNSAFE.arrayIndexScale(byte[].class);
INT_ARRAY_OFFSET = UNSAFE.arrayBaseOffset(int[].class);
INT_ARRAY_SCALE = UNSAFE.arrayIndexScale(int[].class);
SHORT_ARRAY_OFFSET = UNSAFE.arrayBaseOffset(short[].class);
SHORT_ARRAY_SCALE = UNSAFE.arrayIndexScale(short[].class);
} catch (IllegalAccessException | NoSuchFieldException | SecurityException e) {
throw new ExceptionInInitializerError("Cannot access Unsafe");
}
}
public static void checkRange(byte[] buf, int off) {
SafeUtils.checkRange(buf, off);
}
public static void checkRange(byte[] buf, int off, int len) {
SafeUtils.checkRange(buf, off, len);
}
public static void checkLength(int len) {
SafeUtils.checkLength(len);
}
public static byte readByte(byte[] src, int srcOff) {
return UNSAFE.getByte(src, BYTE_ARRAY_OFFSET + BYTE_ARRAY_SCALE * srcOff);
}
public static void writeByte(byte[] src, int srcOff, byte value) {
UNSAFE.putByte(src, BYTE_ARRAY_OFFSET + BYTE_ARRAY_SCALE * srcOff, (byte) value);
}
public static void writeByte(byte[] src, int srcOff, int value) {
writeByte(src, srcOff, (byte) value);
}
public static long readLong(byte[] src, int srcOff) {
return UNSAFE.getLong(src, BYTE_ARRAY_OFFSET + srcOff);
}
public static long readLongLE(byte[] src, int srcOff) {
long i = readLong(src, srcOff);
if (NATIVE_BYTE_ORDER == ByteOrder.BIG_ENDIAN) {
i = Long.reverseBytes(i);
}
return i;
}
public static void writeLong(byte[] dest, int destOff, long value) {
UNSAFE.putLong(dest, BYTE_ARRAY_OFFSET + destOff, value);
}
public static int readInt(byte[] src, int srcOff) {
return UNSAFE.getInt(src, BYTE_ARRAY_OFFSET + srcOff);
}
public static int readIntLE(byte[] src, int srcOff) {
int i = readInt(src, srcOff);
if (NATIVE_BYTE_ORDER == ByteOrder.BIG_ENDIAN) {
i = Integer.reverseBytes(i);
}
return i;
}
public static void writeInt(byte[] dest, int destOff, int value) {
UNSAFE.putInt(dest, BYTE_ARRAY_OFFSET + destOff, value);
}
public static short readShort(byte[] src, int srcOff) {
return UNSAFE.getShort(src, BYTE_ARRAY_OFFSET + srcOff);
}
public static int readShortLE(byte[] src, int srcOff) {
short s = readShort(src, srcOff);
if (NATIVE_BYTE_ORDER == ByteOrder.BIG_ENDIAN) {
s = Short.reverseBytes(s);
}
return s & 0xFFFF;
}
public static void writeShort(byte[] dest, int destOff, short value) {
UNSAFE.putShort(dest, BYTE_ARRAY_OFFSET + destOff, value);
}
public static void writeShortLE(byte[] buf, int off, int v) {
writeByte(buf, off, (byte) v);
writeByte(buf, off + 1, (byte) (v >>> 8));
}
public static int readInt(int[] src, int srcOff) {
return UNSAFE.getInt(src, INT_ARRAY_OFFSET + INT_ARRAY_SCALE * srcOff);
}
public static void writeInt(int[] dest, int destOff, int value) {
UNSAFE.putInt(dest, INT_ARRAY_OFFSET + INT_ARRAY_SCALE * destOff, value);
}
public static int readShort(short[] src, int srcOff) {
return UNSAFE.getShort(src, SHORT_ARRAY_OFFSET + SHORT_ARRAY_SCALE * srcOff) & 0xFFFF;
}
public static void writeShort(short[] dest, int destOff, int value) {
UNSAFE.putShort(dest, SHORT_ARRAY_OFFSET + SHORT_ARRAY_SCALE * destOff, (short) value);
}
public static Unsafe getUNSAFE() {
return UNSAFE;
}
}

Datei anzeigen

@ -1,36 +0,0 @@
package net.jpountz.util;
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.nio.ByteOrder;
public enum Utils {
;
public static final ByteOrder NATIVE_BYTE_ORDER = ByteOrder.nativeOrder();
private static final boolean unalignedAccessAllowed;
static {
String arch = System.getProperty("os.arch");
unalignedAccessAllowed = arch.equals("i386") || arch.equals("x86")
|| arch.equals("amd64") || arch.equals("x86_64");
}
public static boolean isUnalignedAccessAllowed() {
return unalignedAccessAllowed;
}
}