From b9040205aeb63386659cbcf8f6cdb67953d4ce6e Mon Sep 17 00:00:00 2001 From: atovpeko Date: Mon, 28 Apr 2025 15:41:45 +0300 Subject: [PATCH 1/3] apply variable + cleanup --- ...vesync-configure-source-database-awsrds.md | 4 +-- .../_livesync-configure-source-database.md | 4 +-- _partials/_livesync-console.md | 26 +++++++------- _partials/_livesync-limitations.md | 2 +- _partials/_livesync-terminal.md | 22 ++++++------ .../_migrate_live_setup_enable_replication.md | 6 ++-- about/changelog.md | 6 ++-- migrate/index.md | 10 +++--- migrate/livesync-for-postgresql.md | 10 +++--- migrate/livesync-for-s3.md | 36 ++++++++++--------- 10 files changed, 66 insertions(+), 60 deletions(-) diff --git a/_partials/_livesync-configure-source-database-awsrds.md b/_partials/_livesync-configure-source-database-awsrds.md index 0cb2d35809..9dbefb57fc 100644 --- a/_partials/_livesync-configure-source-database-awsrds.md +++ b/_partials/_livesync-configure-source-database-awsrds.md @@ -31,7 +31,7 @@ Updating parameters on a PostgreSQL instance will cause an outage. Choose a time Changing parameters will cause an outage. Wait for the database instance to reboot before continuing. 1. Verify that the settings are live in your database. -1. **Create a user for livesync and assign permissions** +1. **Create a user for $LIVESYNC and assign permissions** 1. Create ``: @@ -63,7 +63,7 @@ Updating parameters on a PostgreSQL instance will cause an outage. Choose a time EOF ``` - If the tables you are syncing are not in the `public` schema, grant the user permissions for each schema you are syncing.: + If the tables you are syncing are not in the `public` schema, grant the user permissions for each schema you are syncing: ```sql psql $SOURCE < TO ; diff --git a/_partials/_livesync-configure-source-database.md b/_partials/_livesync-configure-source-database.md index 3ce73d7a4e..f4fc65ffef 100644 --- a/_partials/_livesync-configure-source-database.md +++ b/_partials/_livesync-configure-source-database.md @@ -15,7 +15,7 @@ import EnableReplication from "versionContent/_partials/_migrate_live_setup_enab This will require a restart of the PostgreSQL source database. -1. **Create a user for livesync and assign permissions** +1. **Create a user for $LIVESYNC and assign permissions** 1. Create ``: @@ -47,7 +47,7 @@ import EnableReplication from "versionContent/_partials/_migrate_live_setup_enab EOF ``` - If the tables you are syncing are not in the `public` schema, grant the user permissions for each schema you are syncing.: + If the tables you are syncing are not in the `public` schema, grant the user permissions for each schema you are syncing: ```sql psql $SOURCE < TO ; diff --git a/_partials/_livesync-console.md b/_partials/_livesync-console.md index d9ca0192ef..498ca60abc 100644 --- a/_partials/_livesync-console.md +++ b/_partials/_livesync-console.md @@ -11,12 +11,12 @@ import TuneSourceDatabaseAWSRDS from "versionContent/_partials/_livesync-configu - Ensure that the source $PG instance and the target $SERVICE_LONG have the same extensions installed. - LiveSync does not create extensions on the target. If the table uses column types from an extension, + $LIVESYNC_CAP does not create extensions on the target. If the table uses column types from an extension, first create the extension on the target $SERVICE_LONG before syncing the table. ## Limitations -* Indexes(including Primary Key and Unique constraints) are not migrated by $SERVICE_LONG. +* Indexes (including Primary Key and Unique constraints) are not migrated by $SERVICE_LONG. We recommend that you create only necessary indexes on the target $SERVICE_LONG depending on your query patterns. @@ -74,41 +74,41 @@ To sync data from your PostgreSQL database to your $SERVICE_LONG using $CONSOLE: 1. **Connect to your $SERVICE_LONG** In [$CONSOLE][portal-ops-mode], select the service to sync live data to. -1. **Start livesync** - 1. Click `Actions` > `livesync for PostgreSQL`. +1. **Start $LIVESYNC** + 1. Click `Actions` > `Livesync for PostgreSQL`. 1. **Connect the source database and target $SERVICE_SHORT** ![Livesync wizard](https://assets.timescale.com/docs/images/livesync-wizard.png) - In `livesync for PostgreSQL`: + In `Livesync for PostgreSQL`: 1. Set the `Livesync Name`. 1. Set the` PostgreSQL Connection String` to point to the source database you want to sync to Timescale. This is the connection string for [``][livesync-tune-source-db]. - 1. Press `Continue`. + 1. Click `Continue`. $CONSOLE connects to the source database and retrieves the schema information. 1. **Optimize the data to synchronize in hypertables** ![livesync start](https://assets.timescale.com/docs/images/livesync-start.png) 1. Select the table to sync, and press `+`. - $CONSOLE checks the table schema and, if possible suggests the column to use as the time dimension in a hypertable. + $CONSOLE checks the table schema and, if possible, suggests the column to use as the time dimension in a hypertable. 1. Repeat this step for each table you want to sync. - 1. Press `Start Livesync`. + 1. Click `Start Livesync`. - $CONSOLE starts livesync between the source database and the target $SERVICE_SHORT and displays the progress. + $CONSOLE starts $LIVESYNC between the source database and the target $SERVICE_SHORT and displays the progress. 1. **Monitor syncronization** - 1. To view the progress of the livesync, click the name of the livesync process: + 1. To view the progress of the $LIVESYNC, click the name of the $LIVESYNC process: ![livesync view status](https://assets.timescale.com/docs/images/livesync-view-status.png) - 1. To pause and restart livesync, click the buttons on the right of the livesync process and select an action: + 1. To pause and restart $LIVESYNC, click the buttons on the right of the $LIVESYNC process and select an action: ![livesync start stop](https://assets.timescale.com/docs/images/livesync-start-stop.png) -And that is it, you are using Livesync to synchronize all the data, or specific tables, from a PostgreSQL database -instance to your $SERVICE_LONG in real-time. +And that is it, you are using $LIVESYNC to synchronize all the data, or specific tables, from a PostgreSQL database +instance to your $SERVICE_LONG in real time. [install-psql]: /integrations/:currentVersion:/psql/ [portal-ops-mode]: https://console.cloud.timescale.com/dashboard/services diff --git a/_partials/_livesync-limitations.md b/_partials/_livesync-limitations.md index 69949ec0c0..dfbeaed8e7 100644 --- a/_partials/_livesync-limitations.md +++ b/_partials/_livesync-limitations.md @@ -4,7 +4,7 @@ the same changes to the source PostgreSQL instance. * Ensure that the source $PG instance and the target $SERVICE_LONG have the same extensions installed. - LiveSync does not create extensions on the target. If the table uses column types from an extension, + $LIVESYNC_CAP does not create extensions on the target. If the table uses column types from an extension, first create the extension on the target $SERVICE_LONG before syncing the table. * There is WAL volume growth on the source PostgreSQL instance during large table copy. * This works for PostgreSQL databases only as source. TimescaleDB is not yet supported. diff --git a/_partials/_livesync-terminal.md b/_partials/_livesync-terminal.md index d133cd86f1..c298988d45 100644 --- a/_partials/_livesync-terminal.md +++ b/_partials/_livesync-terminal.md @@ -10,12 +10,12 @@ import TuneSourceDatabaseAWSRDS from "versionContent/_partials/_migrate_live_tun - Ensure that the source $PG instance and the target $SERVICE_LONG have the same extensions installed. - LiveSync does not create extensions on the target. If the table uses column types from an extension, + $LIVESYNC_CAP does not create extensions on the target. If the table uses column types from an extension, first create the extension on the target $SERVICE_LONG before syncing the table. - [Install Docker][install-docker] on your sync machine. - You need a minimum of a 4 CPU/16GB EC2 instance to run Livesync. + You need a minimum of a 4 CPU/16GB EC2 instance to run $LIVESYNC. - Install the [PostgreSQL client tools][install-psql] on your sync machine. @@ -26,7 +26,7 @@ import TuneSourceDatabaseAWSRDS from "versionContent/_partials/_migrate_live_tun -- The Schema is not migrated by Livesync, you use pg_dump/restore to migrate schema +- The Schema is not migrated by $LIVESYNC, you use pg_dump/restore to migrate schema ## Set your connection strings @@ -129,14 +129,14 @@ events data, and tables that are already partitioned using PostgreSQL declarativ ## Synchronize data to your $SERVICE_LONG -You use the Livesync docker image to synchronize changes in real-time from a PostgreSQL database +You use the $LIVESYNC docker image to synchronize changes in real-time from a PostgreSQL database instance to a $SERVICE_LONG: -1. **Start Livesync** +1. **Start $LIVESYNC** - As you run Livesync continuously, best practice is to run it as a background process. + As you run $LIVESYNC continuously, best practice is to run it as a background process. ```shell docker run -d --rm --name livesync timescale/live-sync:v0.1.11 run --publication analytics --subscription livesync --source $SOURCE --target $TARGET @@ -144,7 +144,7 @@ instance to a $SERVICE_LONG: 1. **Trace progress** - Once Livesync is running as a docker daemon, you can also capture the logs: + Once $LIVESYNC is running as a docker daemon, you can also capture the logs: ```shell docker logs -f livesync ``` @@ -168,7 +168,7 @@ instance to a $SERVICE_LONG: - r: table is ready, synching live changes -1. **Stop Livesync** +1. **Stop $LIVESYNC** ```shell docker stop live-sync @@ -191,9 +191,9 @@ instance to a $SERVICE_LONG: ## Specify the tables to synchronize -After the Livesync docker is up and running, you [`CREATE PUBLICATION`][create-publication] on the SOURCE database to +After the $LIVESYNC docker is up and running, you [`CREATE PUBLICATION`][create-publication] on the SOURCE database to specify the list of tables which you intend to synchronize. Once you create a PUBLICATION, it is -automatically picked by Livesync, which starts synching the tables expressed as part of it. +automatically picked by $LIVESYNC, which starts syncing the tables expressed as part of it. For example: @@ -223,7 +223,7 @@ For example: ALTER PUBLICATION analytics SET(publish_via_partition_root=true); ``` -1. **Stop synching a table in the `PUBLICATION` with a call to `DROP TABLE`** +1. **Stop syncing a table in the `PUBLICATION` with a call to `DROP TABLE`** ```sql ALTER PUBLICATION analytics DROP TABLE tags; diff --git a/_partials/_migrate_live_setup_enable_replication.md b/_partials/_migrate_live_setup_enable_replication.md index 73e6b0b717..b402257d17 100644 --- a/_partials/_migrate_live_setup_enable_replication.md +++ b/_partials/_migrate_live_setup_enable_replication.md @@ -1,9 +1,9 @@ Replica identity assists data replication by identifying the rows being modified. Your options are that each table and hypertable in the source database should either have: -- **A primary key**: Data replication defaults to the primary key of the table being replicated. +- **A primary key**: data replication defaults to the primary key of the table being replicated. Nothing to do. - **A viable unique index**: each table has a unique, non-partial, non-deferrable index that includes only columns - marked as `NOT NULL`. If a UNIQUE index does not exists, create one to assist the migration. You can delete if after + marked as `NOT NULL`. If a UNIQUE index does not exist, create one to assist the migration. You can delete if after migration. For each table, set `REPLICA IDENTITY` to the viable unique index: @@ -19,4 +19,4 @@ ``` For each `UPDATE` or `DELETE` statement, PostgreSQL reads the whole table to find all matching rows. This results in significantly slower replication. If you are expecting a large number of `UPDATE` or `DELETE` operations on the table, - best practice is to not use `FULL` + best practice is to not use `FULL`. diff --git a/about/changelog.md b/about/changelog.md index d3f3122e4f..5e5b83685d 100644 --- a/about/changelog.md +++ b/about/changelog.md @@ -15,7 +15,7 @@ All the latest features and updates to Timescale products. [Livesync for S3](https://docs.timescale.com/migrate/latest/livesync-for-s3/) is our second livesync offering in Timescale Console, following livesync for PostgreSQL. This feature helps users sync data in their S3 buckets to a -Timescale Cloud service, and simplifies data importing. Livesync handles both existing and new data in real-time, +Timescale Cloud service, and simplifies data importing. Livesync handles both existing and new data in real time, automatically syncing everything into a Timescale Cloud service. Users can integrate Timescale Cloud alongside S3, where S3 stores data in raw form as the source for multiple destinations. @@ -133,7 +133,7 @@ To see the job information page, in [$CONSOLE][console], select the $SERVICE_SHO ## 🤩 In-Console Livesync for PostgreSQL -You can now set up an active data ingestion pipeline with Livesync for PostgreSQL in Timescale Console. This tool enables you to replicate your source database tables into Timescale's hypertables indefinitely. Yes, you heard that right—keep Livesync running for as long as you need, ensuring that your existing source PostgreSQL tables stay in sync with Timescale Cloud. Read more about setting up and using [Livesync for PostgreSQL](https://docs.timescale.com/migrate/latest/livesync-for-postgresql/). +You can now set up an active data ingestion pipeline with livesync for PostgreSQL in Timescale Console. This tool enables you to replicate your source database tables into Timescale's hypertables indefinitely. Yes, you heard that right—keep livesync running for as long as you need, ensuring that your existing source PostgreSQL tables stay in sync with Timescale Cloud. Read more about setting up and using [Livesync for PostgreSQL](https://docs.timescale.com/migrate/latest/livesync-for-postgresql/). ![Livesync in Timescale Console](https://assets.timescale.com/docs/images/timescale-cloud-livesync-tile.png) @@ -350,7 +350,7 @@ We have built a new solution that helps you continuously replicate all or some o [Livesync](https://docs.timescale.com/migrate/latest/livesync-for-postgresql/) allows you to keep a current Postgres instance such as RDS as your primary database, and easily offload your real-time analytical queries to Timescale Cloud to boost their performance. If you have any questions or feedback, talk to us in [#livesync in Timescale Community](https://app.slack.com/client/T4GT3N2JK/C086NU9EZ88). -This is just the beginning—you'll see more from Livesync in 2025! +This is just the beginning—you'll see more from livesync in 2025! ## In-Console import from S3, I/O Boost, and Jobs Explorer diff --git a/migrate/index.md b/migrate/index.md index dfe76d523e..e8ab82d04d 100644 --- a/migrate/index.md +++ b/migrate/index.md @@ -36,14 +36,14 @@ see [Ingest data from other sources][data-ingest]. ## Livesync your data -You use $LIVESYNC to synchronize all or some of your data to your $SERVICE_LONG in real-time. You run $LIVESYNC +You use $LIVESYNC to synchronize all or some of your data to your $SERVICE_LONG in real time. You run $LIVESYNC continuously, using your data as a primary database and your $SERVICE_LONG as a logical replica. This enables you to leverage $CLOUD_LONG’s real-time analytics capabilities on your replica data. -| $LIVESYNC options | Downtime requirements | -|----------------------------------------|-----------------------| -| [$LIVESYNC for $PG][livesync-postgres] | None | -| [$LIVESYNC for S3][livesync-s3] | None | +| $LIVESYNC_CAP options | Downtime requirements | +|--------------------------------------------|-----------------------| +| [$LIVESYNC_CAP for $PG][livesync-postgres] | None | +| [$LIVESYNC_CAP for S3][livesync-s3] | None | diff --git a/migrate/livesync-for-postgresql.md b/migrate/livesync-for-postgresql.md index 4135b81ab0..75106906bf 100644 --- a/migrate/livesync-for-postgresql.md +++ b/migrate/livesync-for-postgresql.md @@ -15,7 +15,7 @@ import EarlyAccessNoRelease from "versionContent/_partials/_early_access.mdx"; # Livesync from PostgreSQL to Timescale Cloud You use $LIVESYNC to synchronize all the data, or specific tables, from a PostgreSQL database instance to your -$SERVICE_LONG in real-time. You run $LIVESYNC continuously, turning PostgreSQL into a primary database with your +$SERVICE_LONG in real time. You run $LIVESYNC continuously, turning PostgreSQL into a primary database with your $SERVICE_LONG as a logical replica. This enables you to leverage $CLOUD_LONG’s real-time analytics capabilities on your replica data. @@ -25,7 +25,7 @@ $LIVESYNC_CAP leverages the well-established PostgreSQL logical replication prot $LIVESYNC ensures compatibility, familiarity, and a broader knowledge base. Making it easier for you to adopt $LIVESYNC and integrate your data. -You use $LIVESYNC for data synchronization, rather than migration. Livesync can: +You use $LIVESYNC for data synchronization, rather than migration. * Copy existing data from a PostgreSQL instance to a $SERVICE_LONG: - Copy data at up to 150 GB/hr. @@ -38,14 +38,16 @@ You use $LIVESYNC for data synchronization, rather than migration. Livesync can: $LIVESYNC_CAP disables foreign key validation during the sync. For example, if a `metrics` table refers to the `id` column on the `tags` table, you can still sync only the `metrics` table without worrying about their foreign key relationships. - - Track progress. PostgreSQL exposes `COPY` progress under `pg_stat_progress_copy`. + - Track progress. + + PostgreSQL exposes `COPY` progress under `pg_stat_progress_copy`. * Synchronize real-time changes from a PostgreSQL instance to a $SERVICE_LONG. * Add and remove tables on demand using the [PostgreSQL PUBLICATION interface][postgres-publication-interface]. * Enable features such as [hypertables][about-hypertables], [columnstore][compression], and [continuous aggregates][caggs] on your logical replica. -: livesync is not supported for production use. If you have an questions or feedback, talk to us in #livesync in Timescale Community. +: livesync is not supported for production use. If you have any questions or feedback, talk to us in #livesync in Timescale Community. diff --git a/migrate/livesync-for-s3.md b/migrate/livesync-for-s3.md index 81a39d2e74..5fa845b2b0 100644 --- a/migrate/livesync-for-s3.md +++ b/migrate/livesync-for-s3.md @@ -18,26 +18,26 @@ your replica data. ![$LIVESYNC_CAP view status](https://assets.timescale.com/docs/images/livesync-s3-view-status.png) -You use $LIVESYNC for data synchronization, rather than migration. Livesync can: +You use $LIVESYNC for data synchronization, rather than migration: * Sync data from an S3 bucket instance to a $SERVICE_LONG: - - $LIVESYNC uses Glob patterns to identify the objects to sync. - - $LIVESYNC uses the objects returned for subsequent queries. This efficient approach means files are synced in + - $LIVESYNC_CAP uses Glob patterns to identify the objects to sync. + - $LIVESYNC_CAP uses the objects returned for subsequent queries. This efficient approach means files are synced in [lexicographical order][lex-order]. - - $LIVESYNC watches an S3 bucket for new files and imports them automatically. $LIVESYNC runs on a configurable + - $LIVESYNC_CAP watches an S3 bucket for new files and imports them automatically. $LIVESYNC_CAP runs on a configurable schedule and tracks processed files. - For large backlogs, $LIVESYNC checks every minute until caught up. * Sync data from multiple file formats: - * CSV: checked for compression in `.gz` and `.zip` format, then processing using [timescaledb-parallel-copy][parallel-copy] + * CSV: checked for compression in `.gz` and `.zip` format, then processing using [timescaledb-parallel-copy][parallel-copy]. - * Parquet: converted to CSV, then processed using [timescaledb-parallel-copy][parallel-copy] + * Parquet: converted to CSV, then processed using [timescaledb-parallel-copy][parallel-copy]. * Enable features such as [hypertables][about-hypertables], [columnstore][compression], and [continuous aggregates][caggs] on your logical replica. -$LIVESYNC for S3 continuously imports data from an Amazon S3 bucket into your database. It monitors your S3 bucket for new +$LIVESYNC_CAP for S3 continuously imports data from an Amazon S3 bucket into your database. It monitors your S3 bucket for new files matching a specified pattern and automatically imports them into your designated database table. : livesync is not supported for production use. If you have any questions or feedback, talk to us in #livesync in Timescale Community. @@ -70,19 +70,21 @@ files matching a specified pattern and automatically imports them into your desi - **CSV**: - Maximum file size: 1GB - To increase these limits, contact sales@timescale.com + + To increase this limit, contact sales@timescale.com - Maximum row size: 2MB - Supported compressed formats: - `.gz` - `.zip` - Advanced settings: - Delimiter: the default character is `,`, you can choose a different delimiter - - Skip Header: skip the first row if your file has headers + - Skip header: skip the first row if your file has headers - **Parquet**: - Maximum file size: 1GB - Maximum row group uncompressed size: 200MB - Maximum row size: 2MB - **Sync iteration**: + To prevent system overload, $LIVESYNC tracks up to 100 files for each sync iteration. Additional checks only fill empty queue slots. @@ -96,8 +98,8 @@ To sync data from your S3 bucket to your $SERVICE_LONG using $CONSOLE: In [$CONSOLE][portal-ops-mode], select the service to sync live data to. 1. **Start livesync** - 1. Click `Actions` > `livesync for S3`. - 2. Click `New Livesync for S3` + 1. Click `Actions` > `Livesync for S3`. + 2. Click `New Livesync for S3`. 1. **Connect the source S3 bucket to the target $SERVICE_SHORT** @@ -115,7 +117,7 @@ To sync data from your S3 bucket to your $SERVICE_LONG using $CONSOLE: - `/**`: match all recursively. - `/**/*.csv`: match a specific file type. - $LIVESYNC uses prefix filters where possible, place patterns carefully at the end of your glob expression. + $LIVESYNC_CAP uses prefix filters where possible, place patterns carefully at the end of your glob expression. AWS S3 doesn't support complex filtering. If your expression filters too many files, the list operation may timeout. 1. Click the search icon, you see files to sync. Click `Continue`. @@ -128,17 +130,19 @@ To sync data from your S3 bucket to your $SERVICE_LONG using $CONSOLE: ![Livesync choose table](https://assets.timescale.com/docs/images/livesync-s3-create-tables.png) 1. Choose the `Data type` for each column, then click `Continue`. - 1. Choose the interval. This can be a minute, an hour or use a [cron expression][cron-expression]. + 1. Choose the interval. This can be a minute, an hour, or use a [cron expression][cron-expression]. 1. Repeat this step for each table you want to sync. - 1. Press `Start Livesync`. + 1. Click `Start Livesync`. $CONSOLE starts $LIVESYNC between the source database and the target $SERVICE_SHORT and displays the progress. 1. **Monitor syncronization** - 1. To view the progress of the livesync, click the name of the $LIVESYNC process: + 1. To view the progress of the $LIVESYNC, click the name of the $LIVESYNC process. + You see the status of the file being synced. Only one file runs at a time. ![livesync view status](https://assets.timescale.com/docs/images/livesync-s3-view-status.png) - 1. To pause and restart livesync, click the buttons on the right of the $LIVESYNC process and select an action: + 1. To pause and restart $LIVESYNC, click the buttons on the right of the $LIVESYNC process and select an action. + During pauses, you can edit the configuration before resuming. ![livesync start stop](https://assets.timescale.com/docs/images/livesync-s3-start-stop.png) From f195127d0a2558155d72bd1115b5fb77c50300d7 Mon Sep 17 00:00:00 2001 From: atovpeko Date: Tue, 29 Apr 2025 10:58:06 +0300 Subject: [PATCH 2/3] more little fixes --- _partials/_livesync-console.md | 11 ++++---- _partials/_livesync-terminal.md | 4 +-- _partials/_migrate_import_prerequisites.md | 2 +- ...igrate_live_tune_source_database_awsrds.md | 8 +++--- _partials/_migrate_prerequisites.md | 2 +- migrate/livesync-for-postgresql.md | 2 +- migrate/livesync-for-s3.md | 27 ++++++++++--------- 7 files changed, 29 insertions(+), 27 deletions(-) diff --git a/_partials/_livesync-console.md b/_partials/_livesync-console.md index 498ca60abc..512c734be9 100644 --- a/_partials/_livesync-console.md +++ b/_partials/_livesync-console.md @@ -18,9 +18,9 @@ import TuneSourceDatabaseAWSRDS from "versionContent/_partials/_livesync-configu * Indexes (including Primary Key and Unique constraints) are not migrated by $SERVICE_LONG. - We recommend that you create only necessary indexes on the target $SERVICE_LONG depending on your query patterns. + We recommend that you create only the necessary indexes on the target $SERVICE_LONG depending on your query patterns. -* Tables with user defined types are not migrated by $SERVICE_LONG. +* Tables with user-defined types are not migrated by $SERVICE_LONG. You need to create the user defined types on the target $SERVICE_LONG before syncing the table. @@ -81,9 +81,9 @@ To sync data from your PostgreSQL database to your $SERVICE_LONG using $CONSOLE: ![Livesync wizard](https://assets.timescale.com/docs/images/livesync-wizard.png) - In `Livesync for PostgreSQL`: + In `Livesync for Postgre`: 1. Set the `Livesync Name`. - 1. Set the` PostgreSQL Connection String` to point to the source database you want to sync to Timescale. + 1. Set the `PostgreSQL Connection String` to point to the source database you want to sync to Timescale. This is the connection string for [``][livesync-tune-source-db]. 1. Click `Continue`. @@ -92,7 +92,8 @@ To sync data from your PostgreSQL database to your $SERVICE_LONG using $CONSOLE: 1. **Optimize the data to synchronize in hypertables** ![livesync start](https://assets.timescale.com/docs/images/livesync-start.png) - 1. Select the table to sync, and press `+`. + 1. Select the table to sync and click `+`. + $CONSOLE checks the table schema and, if possible, suggests the column to use as the time dimension in a hypertable. 1. Repeat this step for each table you want to sync. 1. Click `Start Livesync`. diff --git a/_partials/_livesync-terminal.md b/_partials/_livesync-terminal.md index c298988d45..3596a824e6 100644 --- a/_partials/_livesync-terminal.md +++ b/_partials/_livesync-terminal.md @@ -26,7 +26,7 @@ import TuneSourceDatabaseAWSRDS from "versionContent/_partials/_migrate_live_tun -- The Schema is not migrated by $LIVESYNC, you use pg_dump/restore to migrate schema +- The schema is not migrated by $LIVESYNC, you use `pg_dump`/`pg_restore` to migrate it. ## Set your connection strings @@ -61,7 +61,7 @@ The `` in the `SOURCE` connection must have the replication role granted i ## Migrate the table schema to the $SERVICE_LONG -Use pg_dump to: +Use `pg_dump` to: diff --git a/_partials/_migrate_import_prerequisites.md b/_partials/_migrate_import_prerequisites.md index 31f1a00185..e08c5daecc 100644 --- a/_partials/_migrate_import_prerequisites.md +++ b/_partials/_migrate_import_prerequisites.md @@ -10,7 +10,7 @@ Before you migrate your data: Each $SERVICE_LONG has a single database that supports the [most popular extensions][all-available-extensions]. $SERVICE_LONGs do not support tablespaces, and there is no superuser associated with a $SERVICE_SHORT. - Best practice is to create a $SERVICE_LONGs with at least 8 CPUs for a smoother experience. A higher-spec instance + Best practice is to create a $SERVICE_LONG with at least 8 CPUs for a smoother experience. A higher-spec instance can significantly reduce the overall migration window. - To ensure that maintenance does not run during the process, [adjust the maintenance window][adjust-maintenance-window]. diff --git a/_partials/_migrate_live_tune_source_database_awsrds.md b/_partials/_migrate_live_tune_source_database_awsrds.md index 118885fe04..cd54ee0b88 100644 --- a/_partials/_migrate_live_tune_source_database_awsrds.md +++ b/_partials/_migrate_live_tune_source_database_awsrds.md @@ -7,7 +7,7 @@ Updating parameters on a PostgreSQL instance will cause an outage. Choose a time 1. In [https://console.aws.amazon.com/rds/home#databases:][databases], select the RDS instance to migrate. - 1. Click `Configuration`, scroll down and note the `DB instance parameter group`, then click `Parameter Groups` + 1. Click `Configuration`, scroll down and note the `DB instance parameter group`, then click `Parameter groups` : livesync is not supported for production use. If you have any questions or feedback, talk to us in #livesync in Timescale Community. @@ -41,9 +41,10 @@ $LIVESYNC_CAP for S3 continuously imports data from an Amazon S3 bucket into you -- Access to a standard Amazon S3 bucket containing your data files. +- Ensure access to a standard Amazon S3 bucket containing your data files. + Directory buckets are not supported. -- Access credentials for the S3 bucket. +- Configure access credentials for the S3 bucket. - The following credentials are supported: - [IAM Role][credentials-iam]. @@ -64,10 +65,10 @@ $LIVESYNC_CAP for S3 continuously imports data from an Amazon S3 bucket into you ## Limitations - **CSV**: - - Maximum file size: 1GB + - Maximum file size: 1 GB To increase this limit, contact sales@timescale.com - - Maximum row size: 2MB + - Maximum row size: 2 MB - Supported compressed formats: - `.gz` - `.zip` @@ -75,9 +76,9 @@ $LIVESYNC_CAP for S3 continuously imports data from an Amazon S3 bucket into you - Delimiter: the default character is `,`, you can choose a different delimiter - Skip header: skip the first row if your file has headers - **Parquet**: - - Maximum file size: 1GB - - Maximum row group uncompressed size: 200MB - - Maximum row size: 2MB + - Maximum file size: 1 GB + - Maximum row group uncompressed size: 200 MB + - Maximum row size: 2 MB - **Sync iteration**: To prevent system overload, $LIVESYNC tracks up to 100 files for each sync iteration. Additional checks only fill @@ -92,9 +93,9 @@ To sync data from your S3 bucket to your $SERVICE_LONG using $CONSOLE: 1. **Connect to your $SERVICE_LONG** In [$CONSOLE][portal-ops-mode], select the service to sync live data to. -1. **Start livesync** +1. **Start $LIVESYNC** 1. Click `Actions` > `Livesync for S3`. - 2. Click `New Livesync for S3`. + 2. Click `New livesync for S3`. 1. **Connect the source S3 bucket to the target $SERVICE_SHORT** From 55313d26bea5ae176a7f20b86f0f2e25b964e9c3 Mon Sep 17 00:00:00 2001 From: atovpeko Date: Tue, 29 Apr 2025 11:00:24 +0300 Subject: [PATCH 3/3] typo --- _partials/_livesync-console.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/_partials/_livesync-console.md b/_partials/_livesync-console.md index 512c734be9..e15f9b37b6 100644 --- a/_partials/_livesync-console.md +++ b/_partials/_livesync-console.md @@ -81,7 +81,7 @@ To sync data from your PostgreSQL database to your $SERVICE_LONG using $CONSOLE: ![Livesync wizard](https://assets.timescale.com/docs/images/livesync-wizard.png) - In `Livesync for Postgre`: + In `Livesync for Postgres`: 1. Set the `Livesync Name`. 1. Set the `PostgreSQL Connection String` to point to the source database you want to sync to Timescale.