Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(errors): Support writer options in the rust consumer #6788

Open
wants to merge 6 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions rust_snuba/benches/processors.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
use std::collections::HashMap;
use std::sync::Arc;
use std::time::{Duration, SystemTime};

Expand Down Expand Up @@ -62,6 +63,7 @@ fn create_factory(
python_class_name: python_class_name.into(),
python_module: "test".into(),
},
writer_options: HashMap::new(),
};

let processing_concurrency =
Expand Down
1 change: 1 addition & 0 deletions rust_snuba/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@ pub struct StorageConfig {
pub clickhouse_table_name: String,
pub clickhouse_cluster: ClickhouseConfig,
pub message_processor: MessageProcessorConfig,
pub writer_options: HashMap<String, String>,
}

#[derive(Deserialize, Clone, Debug)]
Expand Down
3 changes: 2 additions & 1 deletion rust_snuba/src/consumer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -107,14 +107,15 @@ pub fn consumer_impl(

for storage in &consumer_config.storages {
tracing::info!(
"Storage: {}, ClickHouse Table Name: {}, Message Processor: {:?}, ClickHouse host: {}, ClickHouse port: {}, ClickHouse HTTP port: {}, ClickHouse database: {}",
"Storage: {}, ClickHouse Table Name: {}, Message Processor: {:?}, ClickHouse host: {}, ClickHouse port: {}, ClickHouse HTTP port: {}, ClickHouse database: {}, ClickHouse writer options: {:?}",
storage.name,
storage.clickhouse_table_name,
&storage.message_processor,
storage.clickhouse_cluster.host,
storage.clickhouse_cluster.port,
storage.clickhouse_cluster.http_port,
storage.clickhouse_cluster.database,
storage.writer_options,
);
}

Expand Down
1 change: 1 addition & 0 deletions rust_snuba/src/factory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,7 @@ impl ProcessingStrategyFactory<KafkaPayload> for ConsumerStrategyFactory {
&self.storage_config.clickhouse_cluster.password,
self.async_inserts,
self.batch_write_timeout,
&self.storage_config.writer_options,
);

let accumulator = Arc::new(
Expand Down
53 changes: 53 additions & 0 deletions rust_snuba/src/strategies/clickhouse/batch.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
use std::collections::HashMap;

use reqwest::header::{HeaderMap, HeaderValue, ACCEPT_ENCODING, CONNECTION};
use reqwest::{Client, ClientBuilder};
use sentry_arroyo::gauge;
Expand Down Expand Up @@ -43,6 +45,7 @@ impl BatchFactory {
clickhouse_password: &str,
async_inserts: bool,
batch_write_timeout: Option<Duration>,
writer_options: &HashMap<String, String>,
) -> Self {
let mut headers = HeaderMap::with_capacity(5);
headers.insert(CONNECTION, HeaderValue::from_static("keep-alive"));
Expand All @@ -69,6 +72,9 @@ impl BatchFactory {
query_params.push_str("&async_insert=1&wait_for_async_insert=1");
}
}
for (key, value) in writer_options {
query_params.push_str(&format!("&{key}={value}"));
}

let url = format!("http://{hostname}:{http_port}?{query_params}");
let query = format!("INSERT INTO {table} FORMAT JSONEachRow");
Expand Down Expand Up @@ -268,6 +274,7 @@ mod tests {
"",
false,
None,
&HashMap::new(),
);

let mut batch = factory.new_batch();
Expand Down Expand Up @@ -303,6 +310,48 @@ mod tests {
"",
true,
None,
&HashMap::new(),
);

let mut batch = factory.new_batch();

batch
.write_rows(&RowData::from_encoded_rows(vec![
br#"{"hello": "world"}"#.to_vec()
]))
.unwrap();

concurrency.handle().block_on(batch.finish()).unwrap();

mock.assert();
}

#[test]
fn test_write_skip_unknown_fields() {
crate::testutils::initialize_python();
let server = MockServer::start();
let mock = server.mock(|when, then| {
when.method(POST)
.query_param("input_format_skip_unknown_fields", "1");
then.status(200).body("hi");
});

let concurrency = ConcurrencyConfig::new(1);
let writer_options = HashMap::from([(
"input_format_skip_unknown_fields".to_string(),
"1".to_string(),
)]);
let factory = BatchFactory::new(
&server.host(),
server.port(),
"testtable",
"testdb",
&concurrency,
"default",
"",
true,
None,
&writer_options,
);

let mut batch = factory.new_batch();
Expand Down Expand Up @@ -337,6 +386,7 @@ mod tests {
"",
false,
None,
&HashMap::new(),
);

let mut batch = factory.new_batch();
Expand Down Expand Up @@ -369,6 +419,7 @@ mod tests {
"",
false,
None,
&HashMap::new(),
);

let mut batch = factory.new_batch();
Expand Down Expand Up @@ -405,6 +456,7 @@ mod tests {
// pass in an unreasonably short timeout
// which prevents the client request from reaching Clickhouse
Some(Duration::from_millis(0)),
&HashMap::new(),
);

let mut batch = factory.new_batch();
Expand Down Expand Up @@ -439,6 +491,7 @@ mod tests {
true,
// pass in a reasonable timeout
Some(Duration::from_millis(1000)),
&HashMap::new(),
);

let mut batch = factory.new_batch();
Expand Down
9 changes: 7 additions & 2 deletions snuba/consumers/consumer_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ class StorageConfig:
clickhouse_table_name: str
clickhouse_cluster: ClickhouseClusterConfig
message_processor: MessageProcessorConfig
writer_options: dict[str, str]


@dataclass(frozen=True)
Expand Down Expand Up @@ -171,7 +172,8 @@ def resolve_consumer_config(

validate_storages([*storages.values()])

stream_loader = storages[storage_names[0]].get_table_writer().get_stream_loader()
table_writer = storages[storage_names[0]].get_table_writer()
stream_loader = table_writer.get_stream_loader()
default_topic_spec = stream_loader.get_default_topic_spec()

resolved_raw_topic = _resolve_topic_config(
Expand Down Expand Up @@ -273,7 +275,9 @@ def resolve_storage_config(
database=cluster.get_database(),
)

processor = storage.get_table_writer().get_stream_loader().get_processor()
table_writer = storage.get_table_writer()
processor = table_writer.get_stream_loader().get_processor()
writer_options = table_writer.get_writer_options()

table_schema = storage.get_schema()
assert isinstance(table_schema, TableSchema)
Expand All @@ -285,6 +289,7 @@ def resolve_storage_config(
python_class_name=processor.__class__.__name__,
python_module=processor.__class__.__module__,
),
writer_options=dict(writer_options or {}),
)


Expand Down
2 changes: 2 additions & 0 deletions snuba/datasets/configuration/events/storages/errors.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -393,6 +393,8 @@ replacer_processor:
contexts: []
state_name: errors
storage_key_str: errors
writer_options:
input_format_skip_unknown_fields: 1
stream_loader:
processor: ErrorsProcessor
default_topic: events
Expand Down
3 changes: 3 additions & 0 deletions snuba/datasets/table_storage.py
Original file line number Diff line number Diff line change
Expand Up @@ -343,6 +343,9 @@ def get_replacer_processor(self) -> Optional[ReplacerProcessor[Any]]:
"""
return self.__replacer_processor

def get_writer_options(self) -> ClickhouseWriterOptions:
return self.__writer_options

def __update_writer_options(
self,
options: ClickhouseWriterOptions = None,
Expand Down
Loading