Skip to content

CASSJAVA-89 Fix remove deprecated config chunk_length_kb #2029

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 3 commits into
base: 4.x
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -97,22 +97,32 @@ default SelfT withCompaction(@NonNull CompactionStrategy<?> compactionStrategy)
}

/**
* Configures compression using the LZ4 algorithm with the given chunk length and crc check
* chance.
*
* @see #withCompression(String, int, double)
* @deprecated This method only exists for backward compatibility. Use {@link
* #withLZ4Compression(int)} instead.
*/
@Deprecated
@NonNull
@CheckReturnValue
default SelfT withLZ4Compression(int chunkLengthKB, double crcCheckChance) {
return withCompression("LZ4Compressor", chunkLengthKB, crcCheckChance);
}

/**
* Configures compression using the LZ4 algorithm with the given chunk length.
*
* @see #withCompression(String, int)
*/
@NonNull
@CheckReturnValue
default SelfT withLZ4Compression(int chunkLengthKB) {
return withCompression("LZ4Compressor", chunkLengthKB);
}

/**
* Configures compression using the LZ4 algorithm using the default configuration (64kb
* chunk_length, and 1.0 crc_check_chance).
* chunk_length).
*
* @see #withCompression(String, int, double)
* @see #withCompression(String, int)
*/
@NonNull
@CheckReturnValue
Expand All @@ -121,22 +131,55 @@ default SelfT withLZ4Compression() {
}

/**
* Configures compression using the Snappy algorithm with the given chunk length and crc check
* chance.
* Configures compression using the Zstd algorithm with the given chunk length.
*
* @see #withCompression(String, int)
*/
@NonNull
@CheckReturnValue
default SelfT withZstdCompression(int chunkLengthKB) {
return withCompression("org.apache.cassandra.io.compress.ZstdCompressor", chunkLengthKB);
}

/**
* Configures compression using the Zstd algorithm using the default configuration (64kb
* chunk_length).
*
* @see #withCompression(String, int, double)
* @see #withCompression(String, int)
*/
@NonNull
@CheckReturnValue
default SelfT withZstdCompression() {
return withCompression("org.apache.cassandra.io.compress.ZstdCompressor");
}

/**
* @deprecated This method only exists for backward compatibility. Use {@link
* #withSnappyCompression(int)} instead.
*/
@Deprecated
@NonNull
@CheckReturnValue
default SelfT withSnappyCompression(int chunkLengthKB, double crcCheckChance) {
return withCompression("SnappyCompressor", chunkLengthKB, crcCheckChance);
}

/**
* Configures compression using the Snappy algorithm with the given chunk length.
*
* @see #withCompression(String, int)
*/
@NonNull
@CheckReturnValue
default SelfT withSnappyCompression(int chunkLengthKB) {
return withCompression("SnappyCompressor", chunkLengthKB);
}

/**
* Configures compression using the Snappy algorithm using the default configuration (64kb
* chunk_length, and 1.0 crc_check_chance).
* chunk_length).
*
* @see #withCompression(String, int, double)
* @see #withCompression(String, int)
*/
@NonNull
@CheckReturnValue
Expand All @@ -145,22 +188,32 @@ default SelfT withSnappyCompression() {
}

/**
* Configures compression using the Deflate algorithm with the given chunk length and crc check
* chance.
*
* @see #withCompression(String, int, double)
* @deprecated This method only exists for backward compatibility. Use {@link
* #withDeflateCompression(int)} instead.
*/
@Deprecated
@NonNull
@CheckReturnValue
default SelfT withDeflateCompression(int chunkLengthKB, double crcCheckChance) {
return withCompression("DeflateCompressor", chunkLengthKB, crcCheckChance);
}

/**
* Configures compression using the Deflate algorithm with the given chunk length.
*
* @see #withCompression(String, int)
*/
@NonNull
@CheckReturnValue
default SelfT withDeflateCompression(int chunkLengthKB) {
return withCompression("DeflateCompressor", chunkLengthKB);
}

/**
* Configures compression using the Deflate algorithm using the default configuration (64kb
* chunk_length, and 1.0 crc_check_chance).
* chunk_length).
*
* @see #withCompression(String, int, double)
* @see #withCompression(String, int)
*/
@NonNull
@CheckReturnValue
Expand All @@ -170,13 +223,13 @@ default SelfT withDeflateCompression() {

/**
* Configures compression using the given algorithm using the default configuration (64kb
* chunk_length, and 1.0 crc_check_chance).
* chunk_length).
*
* <p>Unless specifying a custom compression algorithm implementation, it is recommended to use
* {@link #withLZ4Compression()}, {@link #withSnappyCompression()}, or {@link
* #withDeflateCompression()}.
*
* @see #withCompression(String, int, double)
* @see #withCompression(String, int)
*/
@NonNull
@CheckReturnValue
Expand All @@ -185,19 +238,30 @@ default SelfT withCompression(@NonNull String compressionAlgorithmName) {
}

/**
* Configures compression using the given algorithm, chunk length and crc check chance.
* Configures compression using the given algorithm, chunk length.
*
* <p>Unless specifying a custom compression algorithm implementation, it is recommended to use
* {@link #withLZ4Compression()}, {@link #withSnappyCompression()}, or {@link
* #withDeflateCompression()}.
*
* @param compressionAlgorithmName The class name of the compression algorithm.
* @param chunkLengthKB The chunk length in KB of compression blocks. Defaults to 64.
* @param crcCheckChance The probability (0.0 to 1.0) that checksum will be checked on each read.
* Defaults to 1.0.
*/
@NonNull
@CheckReturnValue
default SelfT withCompression(@NonNull String compressionAlgorithmName, int chunkLengthKB) {
return withOption(
"compression",
ImmutableMap.of("class", compressionAlgorithmName, "chunk_length_in_kb", chunkLengthKB));
}

/**
* @deprecated This method only exists for backward compatibility. Use {@link
* #withCompression(String, int)} instead.
*/
@NonNull
@CheckReturnValue
@Deprecated
default SelfT withCompression(
@NonNull String compressionAlgorithmName, int chunkLengthKB, double crcCheckChance) {
return withOption(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -195,6 +195,17 @@ public void should_generate_create_table_lz4_compression() {

@Test
public void should_generate_create_table_lz4_compression_options() {
assertThat(
createDseTable("bar")
.withPartitionKey("k", DataTypes.INT)
.withColumn("v", DataTypes.TEXT)
.withLZ4Compression(1024))
.hasCql(
"CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'LZ4Compressor','chunk_length_kb':1024}");
}

@Test
public void should_generate_create_table_lz4_compression_options_crc() {
assertThat(
createDseTable("bar")
.withPartitionKey("k", DataTypes.INT)
Expand All @@ -204,6 +215,28 @@ public void should_generate_create_table_lz4_compression_options() {
"CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'LZ4Compressor','chunk_length_kb':1024,'crc_check_chance':0.5}");
}

@Test
public void should_generate_create_table_zstd_compression() {
assertThat(
createDseTable("bar")
.withPartitionKey("k", DataTypes.INT)
.withColumn("v", DataTypes.TEXT)
.withZstdCompression())
.hasCql(
"CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'org.apache.cassandra.io.compress.ZstdCompressor'}");
}

@Test
public void should_generate_create_table_zstd_compression_options() {
assertThat(
createDseTable("bar")
.withPartitionKey("k", DataTypes.INT)
.withColumn("v", DataTypes.TEXT)
.withZstdCompression(1024))
.hasCql(
"CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'org.apache.cassandra.io.compress.ZstdCompressor','chunk_length_in_kb':1024}");
}

@Test
public void should_generate_create_table_snappy_compression() {
assertThat(
Expand All @@ -217,6 +250,17 @@ public void should_generate_create_table_snappy_compression() {

@Test
public void should_generate_create_table_snappy_compression_options() {
assertThat(
createDseTable("bar")
.withPartitionKey("k", DataTypes.INT)
.withColumn("v", DataTypes.TEXT)
.withSnappyCompression(2048))
.hasCql(
"CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'SnappyCompressor','chunk_length_in_kb':2048}");
}

@Test
public void should_generate_create_table_snappy_compression_options_crc() {
assertThat(
createDseTable("bar")
.withPartitionKey("k", DataTypes.INT)
Expand All @@ -239,6 +283,17 @@ public void should_generate_create_table_deflate_compression() {

@Test
public void should_generate_create_table_deflate_compression_options() {
assertThat(
createDseTable("bar")
.withPartitionKey("k", DataTypes.INT)
.withColumn("v", DataTypes.TEXT)
.withDeflateCompression(4096))
.hasCql(
"CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'DeflateCompressor','chunk_length_in_kb':4096}");
}

@Test
public void should_generate_create_table_deflate_compression_options_crc() {
assertThat(
createDseTable("bar")
.withPartitionKey("k", DataTypes.INT)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -199,6 +199,17 @@ public void should_generate_create_table_lz4_compression() {

@Test
public void should_generate_create_table_lz4_compression_options() {
assertThat(
createTable("bar")
.withPartitionKey("k", DataTypes.INT)
.withColumn("v", DataTypes.TEXT)
.withLZ4Compression(1024))
.hasCql(
"CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'LZ4Compressor','chunk_length_in_kb':1024}");
}

@Test
public void should_generate_create_table_lz4_compression_options_crc() {
assertThat(
createTable("bar")
.withPartitionKey("k", DataTypes.INT)
Expand All @@ -208,6 +219,28 @@ public void should_generate_create_table_lz4_compression_options() {
"CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'LZ4Compressor','chunk_length_kb':1024,'crc_check_chance':0.5}");
}

@Test
public void should_generate_create_table_zstd_compression() {
assertThat(
createTable("bar")
.withPartitionKey("k", DataTypes.INT)
.withColumn("v", DataTypes.TEXT)
.withZstdCompression())
.hasCql(
"CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'org.apache.cassandra.io.compress.ZstdCompressor'}");
}

@Test
public void should_generate_create_table_zstd_compression_options() {
assertThat(
createTable("bar")
.withPartitionKey("k", DataTypes.INT)
.withColumn("v", DataTypes.TEXT)
.withZstdCompression(1024))
.hasCql(
"CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'org.apache.cassandra.io.compress.ZstdCompressor','chunk_length_in_kb':1024}");
}

@Test
public void should_generate_create_table_snappy_compression() {
assertThat(
Expand All @@ -221,6 +254,17 @@ public void should_generate_create_table_snappy_compression() {

@Test
public void should_generate_create_table_snappy_compression_options() {
assertThat(
createTable("bar")
.withPartitionKey("k", DataTypes.INT)
.withColumn("v", DataTypes.TEXT)
.withSnappyCompression(2048))
.hasCql(
"CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'SnappyCompressor','chunk_length_in_kb':2048}");
}

@Test
public void should_generate_create_table_snappy_compression_options_crc() {
assertThat(
createTable("bar")
.withPartitionKey("k", DataTypes.INT)
Expand All @@ -243,6 +287,17 @@ public void should_generate_create_table_deflate_compression() {

@Test
public void should_generate_create_table_deflate_compression_options() {
assertThat(
createTable("bar")
.withPartitionKey("k", DataTypes.INT)
.withColumn("v", DataTypes.TEXT)
.withDeflateCompression(4096))
.hasCql(
"CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'DeflateCompressor','chunk_length_in_kb':4096}");
}

@Test
public void should_generate_create_table_deflate_compression_options_crc() {
assertThat(
createTable("bar")
.withPartitionKey("k", DataTypes.INT)
Expand Down