-
Notifications
You must be signed in to change notification settings - Fork 1.3k
/
Copy pathreplay.yaml
100 lines (75 loc) · 3.24 KB
/
replay.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
# Optional - Custom identifier for this replay run
tag: ""
# Optional - Identifier for queries executed
source_tag: ""
# Directory location of extracted workload, relative to current directory
workload_location: ""
# Endpoint and username of target cluster to replay queries on
target_cluster_endpoint: "host:port/database"
target_cluster_region: ""
master_username: ""
# NLB or NAT endpoint for Simple Replay to connect to. This NLB or NAT should have connectivity to target_cluster_endpoint
nlb_nat_dns: ""
# Required only for playback using ODBC (pyodbc)
odbc_driver: ""
# If original driver isn't supported (e.g. JDBC), use this driver. "psql" or
# "odbc" are the only valid values.
default_interface: "psql"
# Optional - Leaving it empty defers to connections.json. "all on" preserves
# time between transactions. "all off" disregards time between transactions,
# executing them as a batch.
time_interval_between_transactions: "all on"
# Optional - Leaving it empty defers to connections.json. "all on" preserves
# time between queries. "all off" disregards time between queries, executing
# them as a batch.
time_interval_between_queries: "all on"
# Should COPY statements be executed?
execute_copy_statements: "false"
# Should UNLOAD statements be executed?
execute_unload_statements: "false"
# Optional - Where the UNLOADs and system table unload goes.
replay_output: ""
# Optional - Where the analysis data and summary report will be uploaded. Example: s3://bucket_name/path
analysis_output: ""
# Optional - Leaving this blank means UNLOADs will not be replayed. IAM role for UNLOADs to be performed with.
unload_iam_role: ""
# Optional - Leaving this blank means analysis will not be run. IAM role for analysis needs UNLOAD access.
analysis_iam_role: ""
# Location of the SQL file containing queries to unload system tables
unload_system_table_queries: "unload_system_tables.sql"
# IAM role to UNLOAD system tables from source cluster to S3 location for later
# analysis
target_cluster_system_table_unload_iam_role: ""
# Include filters will work as "db AND user AND pid". Exclude filters will work as "db OR user OR pid".
# In case of multiple values for any specific filter, please enclose each in single quotes
filters:
include:
database_name: ['*']
username: ['*']
pid: ['*']
exclude:
database_name: []
username: []
pid: []
##
## The settings below probably don't need to be modified for a typical run
##
# Set the amount of logging
log_level: "INFO"
# number of proceses to use to parallelize the work. If omitted or null, uses
# one process per cpu - 1
num_workers: ~
# output warnings if connections are not within this number of seconds from
# their expected time.
connection_tolerance_sec: 300
# Number of simplereplay logfiles to maintain
backup_count: 1
# Should we discard the returned data
drop_return: true
# Should connections in the replay be throttled
limit_concurrent_connections: ~
# Should multistatement SQL be split
split_multi: true
# In case of Serverless, set up a secret to store admin username and password. Specify the name of the secret below
# Note: This admin username maps to the username specified as `master_username` in this file. This will be updated to `admin_username` in a future release.
secret_name: ""