Skip to content

CASSJAVA-89 Fix remove deprecated config chunk_length_kb #2029

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: 4.x
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,149 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datastax.oss.driver.querybuilder;

import static org.assertj.core.api.Assertions.assertThat;

import com.datastax.oss.driver.api.core.CqlSession;
import com.datastax.oss.driver.api.core.config.DefaultDriverOption;
import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata;
import com.datastax.oss.driver.api.core.type.DataTypes;
import com.datastax.oss.driver.api.querybuilder.SchemaBuilder;
import com.datastax.oss.driver.api.testinfra.ccm.CcmRule;
import com.datastax.oss.driver.api.testinfra.requirement.BackendRequirement;
import com.datastax.oss.driver.api.testinfra.requirement.BackendType;
import com.datastax.oss.driver.api.testinfra.session.SessionRule;
import com.datastax.oss.driver.api.testinfra.session.SessionUtils;
import com.datastax.oss.driver.categories.ParallelizableTests;
import java.time.Duration;
import org.junit.After;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.RuleChain;
import org.junit.rules.TestRule;

@Category(ParallelizableTests.class)
public class RelationOptionsIT {
private static final CcmRule CCM_RULE = CcmRule.getInstance();

private static final SessionRule<CqlSession> SESSION_RULE =
SessionRule.builder(CCM_RULE)
.withConfigLoader(
SessionUtils.configLoaderBuilder()
.withDuration(DefaultDriverOption.REQUEST_TIMEOUT, Duration.ofSeconds(30))
.build())
.build();

@ClassRule
public static final TestRule CHAIN = RuleChain.outerRule(CCM_RULE).around(SESSION_RULE);

@After
public void clearTable() {
SESSION_RULE.session().execute("DROP TABLE relation_options");
}

@Test
@BackendRequirement(
type = BackendType.CASSANDRA,
minInclusive = "3.0",
description = "CRC check chance was moved to top level table in Cassandra 3.0")
public void should_create_table_with_crc_check_chance() {
try (CqlSession session = session()) {
session.execute(
SchemaBuilder.createTable("relation_options")
.withPartitionKey("id", DataTypes.INT)
.withColumn("name", DataTypes.TEXT)
.withColumn("age", DataTypes.INT)
.withCRCCheckChance(0.8)
.build());
KeyspaceMetadata keyspaceMetadata =
session
.getMetadata()
.getKeyspace(SESSION_RULE.keyspace())
.orElseThrow(AssertionError::new);
String describeOutput = keyspaceMetadata.describeWithChildren(true).trim();

assertThat(describeOutput).contains("crc_check_chance = 0.8");
}
}

@Test
@BackendRequirement(
type = BackendType.CASSANDRA,
minInclusive = "5.0",
description = "chunk_length_kb was renamed to chunk_length_in_kb in Cassandra 5.0")
public void should_create_table_with_chunk_length_in_kb() {
try (CqlSession session = session()) {
session.execute(
SchemaBuilder.createTable("relation_options")
.withPartitionKey("id", DataTypes.INT)
.withColumn("name", DataTypes.TEXT)
.withColumn("age", DataTypes.INT)
.withLZ4Compression(4096)
.build());
KeyspaceMetadata keyspaceMetadata =
session
.getMetadata()
.getKeyspace(SESSION_RULE.keyspace())
.orElseThrow(AssertionError::new);
String describeOutput = keyspaceMetadata.describeWithChildren(true).trim();

assertThat(describeOutput)
.contains("'class':'org.apache.cassandra.io.compress.LZ4Compressor'");
assertThat(describeOutput).contains("'chunk_length_in_kb':'4096'");
}
}

@Test
@BackendRequirement(
type = BackendType.CASSANDRA,
maxExclusive = "5.0",
description = "chunk_length_kb was renamed to chunk_length_in_kb in Cassandra 5.0")
public void should_create_table_with_deprecated_options() {
try (CqlSession session = session()) {
session.execute(
SchemaBuilder.createTable("relation_options")
.withPartitionKey("id", DataTypes.INT)
.withColumn("name", DataTypes.TEXT)
.withColumn("age", DataTypes.INT)
.withLZ4Compression(4096, 1.0)
.build());
KeyspaceMetadata keyspaceMetadata =
session
.getMetadata()
.getKeyspace(SESSION_RULE.keyspace())
.orElseThrow(AssertionError::new);
String describeOutput = keyspaceMetadata.describeWithChildren(true).trim();

assertThat(describeOutput)
.contains("'class':'org.apache.cassandra.io.compress.LZ4Compressor'");
assertThat(describeOutput).contains("'chunk_length_kb':'4096'");
assertThat(describeOutput).contains("'crc_check_chance':'0.8'");
}
}

@SuppressWarnings("unchecked")
private CqlSession session() {
return (CqlSession)
SessionUtils.baseBuilder()
.addContactEndPoints(CCM_RULE.getContactPoints())
.withKeyspace(SESSION_RULE.keyspace())
.build();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,18 @@ default SelfT withCDC(boolean enabled) {
return withOption("cdc", enabled);
}

/**
* Defines the crc check chance.
*
* <p>Note that using this option with a version of Apache Cassandra less than 3.0 will raise a
* syntax error.
*/
@NonNull
@CheckReturnValue
default SelfT withCRCCheckChance(double crcCheckChance) {
return withOption("crc_check_chance", crcCheckChance);
}

/**
* Defines the caching criteria.
*
Expand Down Expand Up @@ -97,22 +109,32 @@ default SelfT withCompaction(@NonNull CompactionStrategy<?> compactionStrategy)
}

/**
* Configures compression using the LZ4 algorithm with the given chunk length and crc check
* chance.
*
* @see #withCompression(String, int, double)
* @deprecated This method only exists for backward compatibility. Will not work with Apache
* Cassandra 5.0 or later. Use {@link #withLZ4Compression(int)} instead.
*/
@Deprecated
@NonNull
@CheckReturnValue
default SelfT withLZ4Compression(int chunkLengthKB, double crcCheckChance) {
return withCompression("LZ4Compressor", chunkLengthKB, crcCheckChance);
}

/**
* Configures compression using the LZ4 algorithm with the given chunk length.
*
* @see #withCompression(String, int)
*/
@NonNull
@CheckReturnValue
default SelfT withLZ4Compression(int chunkLengthKB) {
return withCompression("LZ4Compressor", chunkLengthKB);
}

/**
* Configures compression using the LZ4 algorithm using the default configuration (64kb
* chunk_length, and 1.0 crc_check_chance).
* chunk_length).
*
* @see #withCompression(String, int, double)
* @see #withCompression(String, int)
*/
@NonNull
@CheckReturnValue
Expand All @@ -121,22 +143,57 @@ default SelfT withLZ4Compression() {
}

/**
* Configures compression using the Snappy algorithm with the given chunk length and crc check
* chance.
* Configures compression using the Zstd algorithm with the given chunk length.
*
* @see #withCompression(String, int, double)
* @see #withCompression(String, int)
*/
@NonNull
@CheckReturnValue
default SelfT withZstdCompression(int chunkLengthKB) {
return withCompression("ZstdCompressor", chunkLengthKB);
}

/**
* Configures compression using the Zstd algorithm using the default configuration (64kb
* chunk_length).
*
* @see #withCompression(String, int)
*/
@NonNull
@CheckReturnValue
default SelfT withZstdCompression() {
return withCompression("ZstdCompressor");
}

/**
* @deprecated This method only exists for backward compatibility. Will not work with Apache
* Cassandra 5.0 or later due to removal of deprecated table properties (<a
* href="https://issues.apache.org/jira/browse/CASSANDRA-18742">CASSANDRA-18742</a>). Use
* {@link #withSnappyCompression(int)} instead.
*/
@Deprecated
@NonNull
@CheckReturnValue
default SelfT withSnappyCompression(int chunkLengthKB, double crcCheckChance) {
return withCompression("SnappyCompressor", chunkLengthKB, crcCheckChance);
}

/**
* Configures compression using the Snappy algorithm with the given chunk length.
*
* @see #withCompression(String, int)
*/
@NonNull
@CheckReturnValue
default SelfT withSnappyCompression(int chunkLengthKB) {
return withCompression("SnappyCompressor", chunkLengthKB);
}

/**
* Configures compression using the Snappy algorithm using the default configuration (64kb
* chunk_length, and 1.0 crc_check_chance).
* chunk_length).
*
* @see #withCompression(String, int, double)
* @see #withCompression(String, int)
*/
@NonNull
@CheckReturnValue
Expand All @@ -145,22 +202,34 @@ default SelfT withSnappyCompression() {
}

/**
* Configures compression using the Deflate algorithm with the given chunk length and crc check
* chance.
*
* @see #withCompression(String, int, double)
* @deprecated This method only exists for backward compatibility. Will not work with Apache
* Cassandra 5.0 or later due to removal of deprecated table properties (<a
* href="https://issues.apache.org/jira/browse/CASSANDRA-18742">CASSANDRA-18742</a>). Use
* {@link #withDeflateCompression(int)} instead.
*/
@Deprecated
@NonNull
@CheckReturnValue
default SelfT withDeflateCompression(int chunkLengthKB, double crcCheckChance) {
return withCompression("DeflateCompressor", chunkLengthKB, crcCheckChance);
}

/**
* Configures compression using the Deflate algorithm with the given chunk length.
*
* @see #withCompression(String, int)
*/
@NonNull
@CheckReturnValue
default SelfT withDeflateCompression(int chunkLengthKB) {
return withCompression("DeflateCompressor", chunkLengthKB);
}

/**
* Configures compression using the Deflate algorithm using the default configuration (64kb
* chunk_length, and 1.0 crc_check_chance).
* chunk_length).
*
* @see #withCompression(String, int, double)
* @see #withCompression(String, int)
*/
@NonNull
@CheckReturnValue
Expand All @@ -170,13 +239,13 @@ default SelfT withDeflateCompression() {

/**
* Configures compression using the given algorithm using the default configuration (64kb
* chunk_length, and 1.0 crc_check_chance).
* chunk_length).
*
* <p>Unless specifying a custom compression algorithm implementation, it is recommended to use
* {@link #withLZ4Compression()}, {@link #withSnappyCompression()}, or {@link
* #withDeflateCompression()}.
*
* @see #withCompression(String, int, double)
* @see #withCompression(String, int)
*/
@NonNull
@CheckReturnValue
Expand All @@ -185,19 +254,32 @@ default SelfT withCompression(@NonNull String compressionAlgorithmName) {
}

/**
* Configures compression using the given algorithm, chunk length and crc check chance.
* Configures compression using the given algorithm, chunk length.
*
* <p>Unless specifying a custom compression algorithm implementation, it is recommended to use
* {@link #withLZ4Compression()}, {@link #withSnappyCompression()}, or {@link
* #withDeflateCompression()}.
*
* @param compressionAlgorithmName The class name of the compression algorithm.
* @param chunkLengthKB The chunk length in KB of compression blocks. Defaults to 64.
* @param crcCheckChance The probability (0.0 to 1.0) that checksum will be checked on each read.
* Defaults to 1.0.
*/
@NonNull
@CheckReturnValue
default SelfT withCompression(@NonNull String compressionAlgorithmName, int chunkLengthKB) {
return withOption(
"compression",
ImmutableMap.of("class", compressionAlgorithmName, "chunk_length_in_kb", chunkLengthKB));
}

/**
* @deprecated This method only exists for backward compatibility. Will not work with Apache
* Cassandra 5.0 or later due to removal of deprecated table properties (<a
* href="https://issues.apache.org/jira/browse/CASSANDRA-18742">CASSANDRA-18742</a>). Use
* {@link #withCompression(String, int)} instead.
*/
@NonNull
@CheckReturnValue
@Deprecated
default SelfT withCompression(
@NonNull String compressionAlgorithmName, int chunkLengthKB, double crcCheckChance) {
return withOption(
Expand Down
Loading