Skip to content

Commit be6cd82

Browse files
author
Michael Karsten
committed
refactor: leave existing methods in place for backward compatibility. Add Zstd as a built-in option.
1 parent 7e3cca7 commit be6cd82

File tree

3 files changed

+208
-28
lines changed

3 files changed

+208
-28
lines changed

query-builder/src/main/java/com/datastax/oss/driver/api/querybuilder/schema/RelationOptions.java

+92-22
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919

2020
import static com.datastax.oss.driver.api.querybuilder.SchemaBuilder.RowsPerPartition;
2121

22+
import com.datastax.oss.driver.api.core.config.DriverConfigLoader;
2223
import com.datastax.oss.driver.api.querybuilder.SchemaBuilder;
2324
import com.datastax.oss.driver.api.querybuilder.schema.compaction.CompactionStrategy;
2425
import com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap;
@@ -97,22 +98,32 @@ default SelfT withCompaction(@NonNull CompactionStrategy<?> compactionStrategy)
9798
}
9899

99100
/**
100-
* Configures compression using the LZ4 algorithm with the given chunk length and crc check
101-
* chance.
102-
*
103-
* @see #withCompression(String, int, double)
101+
* @deprecated This method only exists for backward compatibility. Use {@link
102+
* RelationOptions#withLZ4Compression(chunkLengthKB)} instead.
104103
*/
104+
@Deprecated
105105
@NonNull
106106
@CheckReturnValue
107107
default SelfT withLZ4Compression(int chunkLengthKB, double crcCheckChance) {
108108
return withCompression("LZ4Compressor", chunkLengthKB, crcCheckChance);
109109
}
110110

111+
/**
112+
* Configures compression using the LZ4 algorithm with the given chunk length.
113+
*
114+
* @see #withCompression(String, int)
115+
*/
116+
@NonNull
117+
@CheckReturnValue
118+
default SelfT withLZ4Compression(int chunkLengthKB) {
119+
return withCompression("LZ4Compressor", chunkLengthKB);
120+
}
121+
111122
/**
112123
* Configures compression using the LZ4 algorithm using the default configuration (64kb
113-
* chunk_length, and 1.0 crc_check_chance).
124+
* chunk_length).
114125
*
115-
* @see #withCompression(String, int, double)
126+
* @see #withCompression(String, int)
116127
*/
117128
@NonNull
118129
@CheckReturnValue
@@ -121,22 +132,55 @@ default SelfT withLZ4Compression() {
121132
}
122133

123134
/**
124-
* Configures compression using the Snappy algorithm with the given chunk length and crc check
125-
* chance.
135+
* Configures compression using the Zstd algorithm with the given chunk length.
136+
*
137+
* @see #withCompression(String, int)
138+
*/
139+
@NonNull
140+
@CheckReturnValue
141+
default SelfT withZstdCompression(int chunkLengthKB) {
142+
return withCompression("org.apache.cassandra.io.compress.ZstdCompressor", chunkLengthKB);
143+
}
144+
145+
/**
146+
* Configures compression using the Zstd algorithm using the default configuration (64kb
147+
* chunk_length).
126148
*
127-
* @see #withCompression(String, int, double)
149+
* @see #withCompression(String, int)
150+
*/
151+
@NonNull
152+
@CheckReturnValue
153+
default SelfT withZstdCompression() {
154+
return withCompression("org.apache.cassandra.io.compress.ZstdCompressor");
155+
}
156+
157+
/**
158+
* @deprecated This method only exists for backward compatibility. Use {@link
159+
* RelationOptions#withSnappyCompression(chunkLengthKB)} instead.
128160
*/
161+
@Deprecated
129162
@NonNull
130163
@CheckReturnValue
131164
default SelfT withSnappyCompression(int chunkLengthKB, double crcCheckChance) {
132165
return withCompression("SnappyCompressor", chunkLengthKB, crcCheckChance);
133166
}
134167

168+
/**
169+
* Configures compression using the Snappy algorithm with the given chunk length.
170+
*
171+
* @see #withCompression(String, int)
172+
*/
173+
@NonNull
174+
@CheckReturnValue
175+
default SelfT withSnappyCompression(int chunkLengthKB) {
176+
return withCompression("SnappyCompressor", chunkLengthKB);
177+
}
178+
135179
/**
136180
* Configures compression using the Snappy algorithm using the default configuration (64kb
137-
* chunk_length, and 1.0 crc_check_chance).
181+
* chunk_length).
138182
*
139-
* @see #withCompression(String, int, double)
183+
* @see #withCompression(String, int)
140184
*/
141185
@NonNull
142186
@CheckReturnValue
@@ -145,22 +189,32 @@ default SelfT withSnappyCompression() {
145189
}
146190

147191
/**
148-
* Configures compression using the Deflate algorithm with the given chunk length and crc check
149-
* chance.
150-
*
151-
* @see #withCompression(String, int, double)
192+
* @deprecated This method only exists for backward compatibility. Use {@link
193+
* RelationOptions#withDeflateCompression(chunkLengthKB)} instead.
152194
*/
195+
@Deprecated
153196
@NonNull
154197
@CheckReturnValue
155198
default SelfT withDeflateCompression(int chunkLengthKB, double crcCheckChance) {
156199
return withCompression("DeflateCompressor", chunkLengthKB, crcCheckChance);
157200
}
158201

202+
/**
203+
* Configures compression using the Deflate algorithm with the given chunk length.
204+
*
205+
* @see #withCompression(String, int)
206+
*/
207+
@NonNull
208+
@CheckReturnValue
209+
default SelfT withDeflateCompression(int chunkLengthKB) {
210+
return withCompression("DeflateCompressor", chunkLengthKB);
211+
}
212+
159213
/**
160214
* Configures compression using the Deflate algorithm using the default configuration (64kb
161215
* chunk_length, and 1.0 crc_check_chance).
162216
*
163-
* @see #withCompression(String, int, double)
217+
* @see #withCompression(String, int)
164218
*/
165219
@NonNull
166220
@CheckReturnValue
@@ -170,13 +224,13 @@ default SelfT withDeflateCompression() {
170224

171225
/**
172226
* Configures compression using the given algorithm using the default configuration (64kb
173-
* chunk_length, and 1.0 crc_check_chance).
227+
* chunk_length).
174228
*
175229
* <p>Unless specifying a custom compression algorithm implementation, it is recommended to use
176230
* {@link #withLZ4Compression()}, {@link #withSnappyCompression()}, or {@link
177231
* #withDeflateCompression()}.
178232
*
179-
* @see #withCompression(String, int, double)
233+
* @see #withCompression(String, int)
180234
*/
181235
@NonNull
182236
@CheckReturnValue
@@ -185,27 +239,43 @@ default SelfT withCompression(@NonNull String compressionAlgorithmName) {
185239
}
186240

187241
/**
188-
* Configures compression using the given algorithm, chunk length and crc check chance.
242+
* Configures compression using the given algorithm, chunk length.
189243
*
190244
* <p>Unless specifying a custom compression algorithm implementation, it is recommended to use
191245
* {@link #withLZ4Compression()}, {@link #withSnappyCompression()}, or {@link
192246
* #withDeflateCompression()}.
193247
*
194248
* @param compressionAlgorithmName The class name of the compression algorithm.
195249
* @param chunkLengthKB The chunk length in KB of compression blocks. Defaults to 64.
196-
* @param crcCheckChance The probability (0.0 to 1.0) that checksum will be checked on each read.
197-
* Defaults to 1.0.
198250
*/
199251
@NonNull
200252
@CheckReturnValue
253+
default SelfT withCompression(
254+
@NonNull String compressionAlgorithmName, int chunkLengthKB) {
255+
return withOption(
256+
"compression",
257+
ImmutableMap.of(
258+
"class",
259+
compressionAlgorithmName,
260+
"chunk_length_in_kb",
261+
chunkLengthKB));
262+
}
263+
264+
/**
265+
* @deprecated This method only exists for backward compatibility. Use {@link
266+
* RelationOptions#withCompression(compressionAlgorithmName, chunkLengthKB)} instead.
267+
*/
268+
@NonNull
269+
@CheckReturnValue
270+
@Deprecated
201271
default SelfT withCompression(
202272
@NonNull String compressionAlgorithmName, int chunkLengthKB, double crcCheckChance) {
203273
return withOption(
204274
"compression",
205275
ImmutableMap.of(
206276
"class",
207277
compressionAlgorithmName,
208-
"chunk_length_in_kb",
278+
"chunk_length_kb",
209279
chunkLengthKB,
210280
"crc_check_chance",
211281
crcCheckChance));

query-builder/src/test/java/com/datastax/dse/driver/api/querybuilder/schema/CreateDseTableTest.java

+58-3
Original file line numberDiff line numberDiff line change
@@ -195,13 +195,46 @@ public void should_generate_create_table_lz4_compression() {
195195

196196
@Test
197197
public void should_generate_create_table_lz4_compression_options() {
198+
assertThat(
199+
createDseTable("bar")
200+
.withPartitionKey("k", DataTypes.INT)
201+
.withColumn("v", DataTypes.TEXT)
202+
.withLZ4Compression(1024))
203+
.hasCql(
204+
"CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'LZ4Compressor','chunk_length_kb':1024}");
205+
}
206+
207+
@Test
208+
public void should_generate_create_table_lz4_compression_options_crc() {
198209
assertThat(
199210
createDseTable("bar")
200211
.withPartitionKey("k", DataTypes.INT)
201212
.withColumn("v", DataTypes.TEXT)
202213
.withLZ4Compression(1024, .5))
203214
.hasCql(
204-
"CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'LZ4Compressor','chunk_length_in_kb':1024,'crc_check_chance':0.5}");
215+
"CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'LZ4Compressor','chunk_length_kb':1024,'crc_check_chance':0.5}");
216+
}
217+
218+
@Test
219+
public void should_generate_create_table_zstd_compression() {
220+
assertThat(
221+
createDseTable("bar")
222+
.withPartitionKey("k", DataTypes.INT)
223+
.withColumn("v", DataTypes.TEXT)
224+
.withZstdCompression())
225+
.hasCql(
226+
"CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'org.apache.cassandra.io.compress.ZstdCompressor'}");
227+
}
228+
229+
@Test
230+
public void should_generate_create_table_zstd_compression_options() {
231+
assertThat(
232+
createDseTable("bar")
233+
.withPartitionKey("k", DataTypes.INT)
234+
.withColumn("v", DataTypes.TEXT)
235+
.withZstdCompression(1024))
236+
.hasCql(
237+
"CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'org.apache.cassandra.io.compress.ZstdCompressor','chunk_length_in_kb':1024}");
205238
}
206239

207240
@Test
@@ -217,13 +250,24 @@ public void should_generate_create_table_snappy_compression() {
217250

218251
@Test
219252
public void should_generate_create_table_snappy_compression_options() {
253+
assertThat(
254+
createDseTable("bar")
255+
.withPartitionKey("k", DataTypes.INT)
256+
.withColumn("v", DataTypes.TEXT)
257+
.withSnappyCompression(2048))
258+
.hasCql(
259+
"CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'SnappyCompressor','chunk_length_in_kb':2048}");
260+
}
261+
262+
@Test
263+
public void should_generate_create_table_snappy_compression_options_crc() {
220264
assertThat(
221265
createDseTable("bar")
222266
.withPartitionKey("k", DataTypes.INT)
223267
.withColumn("v", DataTypes.TEXT)
224268
.withSnappyCompression(2048, .25))
225269
.hasCql(
226-
"CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'SnappyCompressor','chunk_length_in_kb':2048,'crc_check_chance':0.25}");
270+
"CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'SnappyCompressor','chunk_length_kb':2048,'crc_check_chance':0.25}");
227271
}
228272

229273
@Test
@@ -239,13 +283,24 @@ public void should_generate_create_table_deflate_compression() {
239283

240284
@Test
241285
public void should_generate_create_table_deflate_compression_options() {
286+
assertThat(
287+
createDseTable("bar")
288+
.withPartitionKey("k", DataTypes.INT)
289+
.withColumn("v", DataTypes.TEXT)
290+
.withDeflateCompression(4096))
291+
.hasCql(
292+
"CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'DeflateCompressor','chunk_length_in_kb':4096}");
293+
}
294+
295+
@Test
296+
public void should_generate_create_table_deflate_compression_options_crc() {
242297
assertThat(
243298
createDseTable("bar")
244299
.withPartitionKey("k", DataTypes.INT)
245300
.withColumn("v", DataTypes.TEXT)
246301
.withDeflateCompression(4096, .1))
247302
.hasCql(
248-
"CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'DeflateCompressor','chunk_length_in_kb':4096,'crc_check_chance':0.1}");
303+
"CREATE TABLE bar (k int PRIMARY KEY,v text) WITH compression={'class':'DeflateCompressor','chunk_length_kb':4096,'crc_check_chance':0.1}");
249304
}
250305

251306
@Test

0 commit comments

Comments
 (0)