From 1c08ceb7f44f31dae3f14b6ac7e27c22059fad6e Mon Sep 17 00:00:00 2001 From: PXF Contiuous Integration Date: Fri, 7 Apr 2023 04:34:54 +0000 Subject: [PATCH 01/35] Bump version to 6.6.1-SNAPSHOT [skip ci] --- version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version b/version index 826f5ce030..1b77cc46ff 100644 --- a/version +++ b/version @@ -1 +1 @@ -6.6.0 +6.6.1-SNAPSHOT From 086c06c88bdd6b29cb847cf2620e6c1bcecdd0a5 Mon Sep 17 00:00:00 2001 From: Alexander Denissov Date: Wed, 12 Apr 2023 15:43:44 -0700 Subject: [PATCH 02/35] Updated locations of GP artifacts in CI pipelines (#966) --- .../pipelines/templates/build_pipeline-tpl.yml | 14 +++++++------- .../templates/dev_build_pipeline-tpl.yml | 16 ++++++++-------- .../pipelines/templates/pr_pipeline-tpl.yml | 12 ++++++------ 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/concourse/pipelines/templates/build_pipeline-tpl.yml b/concourse/pipelines/templates/build_pipeline-tpl.yml index 1182e10c72..e5775b519e 100644 --- a/concourse/pipelines/templates/build_pipeline-tpl.yml +++ b/concourse/pipelines/templates/build_pipeline-tpl.yml @@ -471,7 +471,7 @@ resources: url: ((ud/pxf/secrets/ud-pipeline-bot-gp-releng-webhook)) {% endif %} -## ---------- Product Packages ---------- +## ---------- Greenplum Packages ---------- {% set gp_ver = 5 %} {% for i in range(num_gpdb5_versions) %} - name: gpdb[[gp_ver]]_rhel7_rpm_latest-[[i]] @@ -480,7 +480,7 @@ resources: source: bucket: ((ud/pxf/common/pivnet-artifacts-bucket-name)) json_key: ((ud/pxf/secrets/pxf-storage-service-account-key)) - regexp: latest-[[i]]_gpdb[[gp_ver]]/greenplum-db-(.*)-rhel7-x86_64.rpm + regexp: latest-[[i]]_gpdb[[gp_ver]]/greenplum-db-([[gp_ver]].*)-rhel7-x86_64.rpm {% endfor %} {# range(num_gpdb5_versions) #} {% set gp_ver = None %} @@ -492,7 +492,7 @@ resources: source: bucket: ((ud/pxf/common/pivnet-artifacts-bucket-name)) json_key: ((ud/pxf/secrets/pxf-storage-service-account-key)) - regexp: latest-[[i]]_gpdb[[gp_ver]]/greenplum-db-(.*)-rhel7-x86_64.rpm + regexp: latest-[[i]]_gpdb[[gp_ver]]/greenplum-db-([[gp_ver]].*)-rhel7-x86_64.rpm - name: gpdb[[gp_ver]]_ubuntu18_deb_latest-[[i]] type: gcs @@ -500,7 +500,7 @@ resources: source: bucket: ((ud/pxf/common/pivnet-artifacts-bucket-name)) json_key: ((ud/pxf/secrets/pxf-storage-service-account-key)) - regexp: latest-[[i]]_gpdb[[gp_ver]]/greenplum-db-(.*)-ubuntu18.04-amd64.deb + regexp: latest-[[i]]_gpdb[[gp_ver]]/greenplum-db-([[gp_ver]].*)-ubuntu18.04-amd64.deb {% endfor %} {# range(num_gpdb6_versions) #} {% set gp_ver = None %} @@ -512,7 +512,7 @@ resources: source: bucket: ((ud/pxf/common/pivnet-artifacts-bucket-name)) json_key: ((ud/pxf/secrets/pxf-storage-service-account-key)) - regexp: latest-[[i]]_gpdb[[gp_ver]]/greenplum-db-(.*)-rhel8-x86_64.rpm + regexp: latest-[[i]]_gpdb[[gp_ver]]/greenplum-db-([[gp_ver]].*)-rhel8-x86_64.rpm {% endfor %} {# range(num_gpdb6_versions) #} {% set gp_ver = None %} @@ -523,13 +523,13 @@ resources: source: bucket: ((ud/pxf/common/gpdb-concourse-resources-prod-bucket-name)) json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/published/main/greenplum-db-(7.*)-el8-x86_64.rpm + regexp: server/released/gpdb7/greenplum-db-(7.*)-el8-x86_64.rpm ## ---------- PXF 5 (for GPDB 6) Artifact --------------- - name: pxf5_gp6_rhel7_released type: gcs source: - bucket: ((ud/pxf/[[ environment ]]/releng-drop-bucket-name)) + bucket: ((ud/pxf/prod/releng-drop-bucket-name)) json_key: ((concourse-gcs-resources-service-account-key)) regexp: ((ud/pxf/common/releng-drop-path))/gpdb6/pxf-gp6-5.(.*)-2.el7.x86_64.rpm diff --git a/concourse/pipelines/templates/dev_build_pipeline-tpl.yml b/concourse/pipelines/templates/dev_build_pipeline-tpl.yml index e8c69b0015..489dc371b4 100644 --- a/concourse/pipelines/templates/dev_build_pipeline-tpl.yml +++ b/concourse/pipelines/templates/dev_build_pipeline-tpl.yml @@ -257,7 +257,7 @@ resources: url: ((ud-pipeline-bot-[[user]]-webhook)) {% endif %} -## ---------- Product Packages ---------- +## ---------- Greenplum Packages ---------- {% set gp_ver = 5 %} - name: gpdb[[gp_ver]]_rhel7_rpm_latest-0 type: gcs @@ -265,7 +265,7 @@ resources: source: bucket: ((ud/pxf/common/pivnet-artifacts-bucket-name)) json_key: ((ud/pxf/secrets/pxf-storage-service-account-key)) - regexp: latest-0_gpdb[[gp_ver]]/greenplum-db-(.*)-rhel7-x86_64.rpm + regexp: latest-0_gpdb[[gp_ver]]/greenplum-db-([[gp_ver]].*)-rhel7-x86_64.rpm {% set gp_ver = None %} {% set gp_ver = 6 %} @@ -275,7 +275,7 @@ resources: source: bucket: ((ud/pxf/common/pivnet-artifacts-bucket-name)) json_key: ((ud/pxf/secrets/pxf-storage-service-account-key)) - regexp: latest-0_gpdb[[gp_ver]]/greenplum-db-(.*)-rhel7-x86_64.rpm + regexp: latest-0_gpdb[[gp_ver]]/greenplum-db-([[gp_ver]].*)-rhel7-x86_64.rpm - name: gpdb[[gp_ver]]_ubuntu18_deb_latest-0 type: gcs @@ -283,15 +283,15 @@ resources: source: bucket: ((ud/pxf/common/pivnet-artifacts-bucket-name)) json_key: ((ud/pxf/secrets/pxf-storage-service-account-key)) - regexp: latest-0_gpdb[[gp_ver]]/greenplum-db-(.*)-ubuntu18.04-amd64.deb + regexp: latest-0_gpdb[[gp_ver]]/greenplum-db-([[gp_ver]].*)-ubuntu18.04-amd64.deb - name: gpdb[[gp_ver]]_rhel8_rpm_latest-0 type: gcs icon: google-drive source: - bucket: ((ud/pxf/common/gpdb-concourse-resources-prod-bucket-name)) - json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/published/gpdb6/greenplum-db-(6.*)-rhel8-x86_64.rpm + bucket: ((ud/pxf/common/pivnet-artifacts-bucket-name)) + json_key: ((ud/pxf/secrets/pxf-storage-service-account-key)) + regexp: latest-0_gpdb[[gp_ver]]/greenplum-db-([[gp_ver]].*)-rhel8-x86_64.rpm {% set gp_ver = None %} ## ---------- Greenplum 7 Beta Builds ---------- @@ -301,7 +301,7 @@ resources: source: bucket: ((ud/pxf/common/gpdb-concourse-resources-prod-bucket-name)) json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/published/main/greenplum-db-(7.*)-el8-x86_64.rpm + regexp: server/released/gpdb7/greenplum-db-(7.*)-el8-x86_64.rpm ## ---------- PXF 5 Artifact --------------- {% if multinode %} diff --git a/concourse/pipelines/templates/pr_pipeline-tpl.yml b/concourse/pipelines/templates/pr_pipeline-tpl.yml index ce0e69389f..d4aff4d13e 100644 --- a/concourse/pipelines/templates/pr_pipeline-tpl.yml +++ b/concourse/pipelines/templates/pr_pipeline-tpl.yml @@ -83,7 +83,7 @@ resources: password: ((ud/pxf/secrets/pxf-cloudbuild-service-account-key)) {% set gp_ver = None %} -## ---------- Product Packages ---------- +## ---------- Greenplum Packages ---------- {% set gp_ver = 5 %} {% for i in range(num_gpdb5_versions) %} - name: gpdb[[gp_ver]]_rhel7_rpm_latest-[[i]] @@ -92,7 +92,7 @@ resources: source: bucket: ((ud/pxf/common/pivnet-artifacts-bucket-name)) json_key: ((ud/pxf/secrets/pxf-storage-service-account-key)) - regexp: latest-[[i]]_gpdb[[gp_ver]]/greenplum-db-(.*)-rhel7-x86_64.rpm + regexp: latest-[[i]]_gpdb[[gp_ver]]/greenplum-db-([[gp_ver]].*)-rhel7-x86_64.rpm {% endfor %} {# range(num_gpdb5_versions) #} {% set gp_ver = None %} @@ -104,7 +104,7 @@ resources: source: bucket: ((ud/pxf/common/pivnet-artifacts-bucket-name)) json_key: ((ud/pxf/secrets/pxf-storage-service-account-key)) - regexp: latest-[[i]]_gpdb[[gp_ver]]/greenplum-db-(.*)-rhel7-x86_64.rpm + regexp: latest-[[i]]_gpdb[[gp_ver]]/greenplum-db-([[gp_ver]].*)-rhel7-x86_64.rpm - name: gpdb[[gp_ver]]_rhel8_rpm_latest-[[i]] type: gcs @@ -112,7 +112,7 @@ resources: source: bucket: ((ud/pxf/common/pivnet-artifacts-bucket-name)) json_key: ((ud/pxf/secrets/pxf-storage-service-account-key)) - regexp: latest-[[i]]_gpdb[[gp_ver]]/greenplum-db-(.*)-rhel8-x86_64.rpm + regexp: latest-[[i]]_gpdb[[gp_ver]]/greenplum-db-([[gp_ver]].*)-rhel8-x86_64.rpm - name: gpdb[[gp_ver]]_ubuntu18_deb_latest-[[i]] type: gcs @@ -120,7 +120,7 @@ resources: source: bucket: ((ud/pxf/common/pivnet-artifacts-bucket-name)) json_key: ((ud/pxf/secrets/pxf-storage-service-account-key)) - regexp: latest-[[i]]_gpdb[[gp_ver]]/greenplum-db-(.*)-ubuntu18.04-amd64.deb + regexp: latest-[[i]]_gpdb[[gp_ver]]/greenplum-db-([[gp_ver]].*)-ubuntu18.04-amd64.deb {% endfor %} {# range(num_gpdb6_versions) #} {% set gp_ver = None %} @@ -131,7 +131,7 @@ resources: source: bucket: ((ud/pxf/common/gpdb-concourse-resources-prod-bucket-name)) json_key: ((concourse-gcs-resources-service-account-key)) - regexp: server/published/main/greenplum-db-(7.*)-el8-x86_64.rpm + regexp: server/released/gpdb7/greenplum-db-(7.*)-el8-x86_64.rpm ## ====================================================================== ## JOBS From b56aeaeea0c09bec07a84ed11c21eba9d2e30557 Mon Sep 17 00:00:00 2001 From: Ashuka Xue Date: Mon, 1 May 2023 10:26:42 -0700 Subject: [PATCH 03/35] Update automation README (#969) This commit adds up-to-date instructions for SSH configuration to ensure that automation can run on developer machines. --- automation/README.Linux.md | 14 +------------- automation/README.md | 12 +++++++----- 2 files changed, 8 insertions(+), 18 deletions(-) diff --git a/automation/README.Linux.md b/automation/README.Linux.md index 6f7acfa3f3..4b6f4de70c 100644 --- a/automation/README.Linux.md +++ b/automation/README.Linux.md @@ -1,6 +1,7 @@ # Running Automation on Linux **Note:** This information was documented based on the steps taken to get automation running on a Debian Bookworm (12) system. +They are intended to be used in tandem with the information in the main README file. ## Locale Setup @@ -19,19 +20,6 @@ gpstop -a gpstart -a ``` -## SSH Setup - -```sh -sudo tee /etc/ssh/sshd_config.d/pxf-automation.conf >/dev/null /dev/null <_.log -TestNg report will generated into target/surefire-reports +TestNg report will be generated into target/surefire-reports # IDE Setup (IntelliJ) and Automation Debugging From b21768715c7dc73b8598951036d44bb286055ff0 Mon Sep 17 00:00:00 2001 From: Ashuka Xue Date: Thu, 4 May 2023 10:27:38 -0700 Subject: [PATCH 04/35] Add pxfdelimited_import formatter to support multibyte delimiters for TEXT and CSV profiles (#956) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit adds a new formatter, `pxfdelimiter_import` which can be used with `:text` and `:csv` profiles. This formatter allows users who have text or CSV files with multi-byte delimiters (such as ¤ or 停) to load and query tables pointing to these types of files via PXF. This formatter also supports multi-character delimiters (such as DELIM). This formatter can be used by creating an external table similar to the following: ``` CREATE READABLE EXTERNAL TABLE multibyte_example (id int, city text, state text) LOCATION ('pxf://multibyte_example?PROFILE=hdfs:text') FORMAT 'CUSTOM' (FORMATTER='pxfdelimited_import', DELIMITER='¤'); ``` The following format options are also available for use: - QUOTE (default value `"`) - ESCAPE (default value is the quote value) - NEWLINE (default value is `LF`, options are `LF`, `CR`, `CRLF`) The introduction of this PR also bumps the PXF extension version from 2.0 to 2.1, and use of this formatter requires users who already have an existing database with the PXF extension to run the following command: ``` ALTER EXTENSION pxf UPDATE; ``` --- .../pxf/automation/components/gpdb/Gpdb.java | 16 +- .../pxf/automation/utils/csv/CsvUtils.java | 64 +- .../features/extension/PxfExtensionTest.java | 135 ++++ .../features/gpupgrade/GpupgradeTest.java | 24 +- .../MultibyteDelimiterTest.java | 655 ++++++++++++++++ .../__init__.py | 0 .../create_extension}/__init__.py | 0 .../create_extension/expected/query01.ans | 72 ++ .../create_extension/runTest.py | 12 + .../create_extension/sql/query01.sql | 28 + .../create_extension_rpm}/__init__.py | 0 .../create_extension_rpm/expected/query01.ans | 60 ++ .../create_extension_rpm/runTest.py | 12 + .../create_extension_rpm/sql/query01.sql | 28 + .../extension_tests/downgrade/__init__.py | 0 .../step_1_create_extension/__init__.py | 0 .../expected/query01.ans | 73 ++ .../step_1_create_extension/runTest.py | 12 + .../step_1_create_extension/sql/query01.sql | 28 + .../__init__.py | 0 .../expected/query01.ans | 67 ++ .../runTest.py | 12 + .../sql/query01.sql | 34 + .../downgrade_then_upgrade/__init__.py | 0 .../step_1_check_extension/__init__.py | 0 .../expected/query01.ans | 73 ++ .../step_1_check_extension/runTest.py | 12 + .../step_1_check_extension/sql/query01.sql | 28 + .../__init__.py | 0 .../expected/query01.ans | 67 ++ .../runTest.py | 12 + .../sql/query01.sql | 34 + .../__init__.py | 0 .../expected/query01.ans | 73 ++ .../runTest.py | 12 + .../sql/query01.sql | 28 + .../explicit_upgrade/__init__.py | 0 .../__init__.py | 0 .../expected/query01.ans | 57 ++ .../runTest.py | 12 + .../sql/query01.sql | 26 + .../step_2_after_alter_extension/__init__.py | 0 .../expected/query01.ans | 73 ++ .../step_2_after_alter_extension/runTest.py | 12 + .../sql/query01.sql | 28 + .../extension_tests/upgrade/__init__.py | 0 .../__init__.py | 0 .../expected/query01.ans | 58 ++ .../runTest.py | 12 + .../sql/query01.sql | 26 + .../step_2_after_alter_extension/__init__.py | 0 .../expected/query01.ans | 73 ++ .../step_2_after_alter_extension/runTest.py | 12 + .../sql/query01.sql | 28 + .../gpupgrade/extension2_0/__init__.py | 0 .../__init__.py | 0 .../expected/query01.ans | 9 +- .../runTest.py | 0 .../sql/query01.sql | 2 + .../__init__.py | 0 .../expected/query01.ans | 9 +- .../runTest.py | 0 .../sql/query01.sql | 2 + .../__init__.py | 0 .../expected/query01.ans | 9 +- .../runTest.py | 0 .../sql/query01.sql | 2 + .../gpupgrade/extension2_1/__init__.py | 0 .../__init__.py | 0 .../expected/query01.ans | 52 ++ .../runTest.py | 12 + .../sql/query01.sql | 20 + .../__init__.py | 0 .../expected/query01.ans | 52 ++ .../runTest.py | 12 + .../sql/query01.sql | 20 + .../__init__.py | 0 .../expected/query01.ans | 50 ++ .../runTest.py | 12 + .../sql/query01.sql | 20 + .../features/multibyte_delimiter/__init__.py | 0 .../multibyte_delimiter/encoding/__init__.py | 0 .../encoding/expected/query01.ans | 9 + .../multibyte_delimiter/encoding/runTest.py | 12 + .../encoding/sql/query01.sql | 3 + .../encoding_bytes/__init__.py | 0 .../encoding_bytes/expected/query01.ans | 9 + .../encoding_bytes/runTest.py | 12 + .../encoding_bytes/sql/query01.sql | 3 + .../encoding_quote/__init__.py | 0 .../encoding_quote/expected/query01.ans | 9 + .../encoding_quote/runTest.py | 12 + .../encoding_quote/sql/query01.sql | 3 + .../encoding_quote_escape/__init__.py | 0 .../expected/query01.ans | 9 + .../encoding_quote_escape/runTest.py | 12 + .../encoding_quote_escape/sql/query01.sql | 3 + .../multibyte_delimiter/four_byte/__init__.py | 0 .../four_byte/expected/query01.ans | 201 +++++ .../multibyte_delimiter/four_byte/runTest.py | 12 + .../four_byte/sql/query01.sql | 5 + .../multi_char/__init__.py | 0 .../multi_char/expected/query01.ans | 201 +++++ .../multibyte_delimiter/multi_char/runTest.py | 12 + .../multi_char/sql/query01.sql | 5 + .../no_profile/__init__.py | 0 .../no_profile/expected/query01.ans | 8 + .../multibyte_delimiter/no_profile/runTest.py | 12 + .../no_profile/sql/query01.sql | 3 + .../multibyte_delimiter/one_byte/__init__.py | 0 .../one_byte/expected/query01.ans | 106 +++ .../multibyte_delimiter/one_byte/runTest.py | 12 + .../one_byte/sql/query01.sql | 3 + .../multibyte_delimiter/one_col/__init__.py | 0 .../one_col/expected/query01.ans | 11 + .../multibyte_delimiter/one_col/runTest.py | 12 + .../one_col/sql/query01.sql | 3 + .../one_col_quote/__init__.py | 0 .../one_col_quote/expected/query01.ans | 11 + .../one_col_quote/runTest.py | 12 + .../one_col_quote/sql/query01.sql | 3 + .../quote_escape_newline/__init__.py | 0 .../quote_escape_newline/expected/query01.ans | 6 + .../quote_escape_newline/runTest.py | 12 + .../quote_escape_newline/sql/query01.sql | 3 + .../three_byte/__init__.py | 0 .../three_byte/expected/query01.ans | 201 +++++ .../multibyte_delimiter/three_byte/runTest.py | 12 + .../three_byte/sql/query01.sql | 5 + .../multibyte_delimiter/two_byte/__init__.py | 0 .../two_byte/expected/query01.ans | 201 +++++ .../multibyte_delimiter/two_byte/runTest.py | 12 + .../two_byte/sql/query01.sql | 5 + .../two_byte_no_delim/__init__.py | 0 .../two_byte_no_delim/expected/query01.ans | 8 + .../two_byte_no_delim/runTest.py | 12 + .../two_byte_no_delim/sql/query01.sql | 3 + .../two_byte_with_bzip2/__init__.py | 0 .../two_byte_with_bzip2/expected/query01.ans | 108 +++ .../two_byte_with_bzip2/runTest.py | 12 + .../two_byte_with_bzip2/sql/query01.sql | 3 + .../two_byte_with_cr/__init__.py | 0 .../two_byte_with_cr/expected/query01.ans | 106 +++ .../two_byte_with_cr/runTest.py | 12 + .../two_byte_with_cr/sql/query01.sql | 3 + .../two_byte_with_crlf/__init__.py | 0 .../two_byte_with_crlf/expected/query01.ans | 106 +++ .../two_byte_with_crlf/runTest.py | 12 + .../two_byte_with_crlf/sql/query01.sql | 3 + .../two_byte_with_quote/__init__.py | 0 .../two_byte_with_quote/expected/query01.ans | 106 +++ .../two_byte_with_quote/runTest.py | 12 + .../two_byte_with_quote/sql/query01.sql | 3 + .../__init__.py | 0 .../expected/query01.ans | 108 +++ .../two_byte_with_quote_and_escape/runTest.py | 12 + .../sql/query01.sql | 3 + .../two_byte_with_wrong_delim/__init__.py | 0 .../expected/query01.ans | 7 + .../two_byte_with_wrong_delim/runTest.py | 12 + .../two_byte_with_wrong_delim/sql/query01.sql | 3 + .../two_byte_with_wrong_eol/__init__.py | 0 .../expected/query01.ans | 6 + .../two_byte_with_wrong_eol/runTest.py | 12 + .../two_byte_with_wrong_eol/sql/query01.sql | 3 + .../two_byte_with_wrong_eol_5X/__init__.py | 0 .../expected/query01.ans | 18 + .../two_byte_with_wrong_eol_5X/runTest.py | 12 + .../sql/query01.sql | 11 + .../two_byte_with_wrong_escape/__init__.py | 0 .../expected/query01.ans | 6 + .../two_byte_with_wrong_escape/runTest.py | 12 + .../sql/query01.sql | 3 + .../two_byte_with_wrong_quote/__init__.py | 0 .../expected/query01.ans | 6 + .../two_byte_with_wrong_quote/runTest.py | 12 + .../two_byte_with_wrong_quote/sql/query01.sql | 3 + .../two_byte_with_wrong_quote_5X/__init__.py | 0 .../expected/query01.ans | 18 + .../two_byte_with_wrong_quote_5X/runTest.py | 12 + .../sql/query01.sql | 11 + .../two_byte_wrong_formatter/__init__.py | 0 .../expected/query01.ans | 8 + .../two_byte_wrong_formatter/runTest.py | 12 + .../two_byte_wrong_formatter/sql/query01.sql | 3 + .../wrong_profile/__init__.py | 0 .../wrong_profile/expected/query01.ans | 8 + .../wrong_profile/runTest.py | 12 + .../wrong_profile/sql/query01.sql | 3 + .../templates/build_pipeline-tpl.yml | 40 + concourse/scripts/cli/test_reset_init.sh | 2 +- concourse/scripts/pxf_common.bash | 73 ++ concourse/scripts/test.bash | 61 +- concourse/scripts/test_pxf.bash | 13 - concourse/scripts/test_upgrade_extension.bash | 96 +++ concourse/tasks/test.yml | 1 + concourse/tasks/test_certification.yml | 1 + concourse/tasks/upgrade_extension.yml | 34 + external-table/Makefile | 8 +- external-table/pxf--2.0--2.1.sql | 7 + external-table/pxf--2.1--2.0.sql | 9 + external-table/pxf--2.1.sql | 32 + external-table/pxf.control | 2 +- external-table/src/pxfdelimited_formatter.c | 719 ++++++++++++++++++ external-table/src/pxfdelimited_formatter.h | 45 ++ external-table/src/pxfheaders.c | 56 +- external-table/src/pxfheaders.h | 1 + .../src/scripts/pxf-post-gpupgrade | 15 +- .../pxf-service/src/scripts/pxf-pre-gpupgrade | 15 +- 209 files changed, 5521 insertions(+), 110 deletions(-) create mode 100644 automation/src/test/java/org/greenplum/pxf/automation/features/extension/PxfExtensionTest.java create mode 100644 automation/src/test/java/org/greenplum/pxf/automation/features/multibytedelimiter/MultibyteDelimiterTest.java rename automation/tincrepo/main/pxf/features/{gpupgrade/step_1_before_running_pxf_pre_gpupgrade => extension_tests}/__init__.py (100%) rename automation/tincrepo/main/pxf/features/{gpupgrade/step_2_after_running_pxf_pre_gpupgrade => extension_tests/create_extension}/__init__.py (100%) create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/create_extension/expected/query01.ans create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/create_extension/runTest.py create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/create_extension/sql/query01.sql rename automation/tincrepo/main/pxf/features/{gpupgrade/step_3_after_running_pxf_post_gpupgrade => extension_tests/create_extension_rpm}/__init__.py (100%) create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/create_extension_rpm/expected/query01.ans create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/create_extension_rpm/runTest.py create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/create_extension_rpm/sql/query01.sql create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/downgrade/__init__.py create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/downgrade/step_1_create_extension/__init__.py create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/downgrade/step_1_create_extension/expected/query01.ans create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/downgrade/step_1_create_extension/runTest.py create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/downgrade/step_1_create_extension/sql/query01.sql create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/downgrade/step_2_after_alter_extension_downgrade/__init__.py create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/downgrade/step_2_after_alter_extension_downgrade/expected/query01.ans create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/downgrade/step_2_after_alter_extension_downgrade/runTest.py create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/downgrade/step_2_after_alter_extension_downgrade/sql/query01.sql create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/__init__.py create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_1_check_extension/__init__.py create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_1_check_extension/expected/query01.ans create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_1_check_extension/runTest.py create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_1_check_extension/sql/query01.sql create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_2_after_alter_extension_downgrade/__init__.py create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_2_after_alter_extension_downgrade/expected/query01.ans create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_2_after_alter_extension_downgrade/runTest.py create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_2_after_alter_extension_downgrade/sql/query01.sql create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_3_after_alter_extension_upgrade/__init__.py create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_3_after_alter_extension_upgrade/expected/query01.ans create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_3_after_alter_extension_upgrade/runTest.py create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_3_after_alter_extension_upgrade/sql/query01.sql create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/__init__.py create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/step_1_create_extension_with_older_pxf_version/__init__.py create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/step_1_create_extension_with_older_pxf_version/expected/query01.ans create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/step_1_create_extension_with_older_pxf_version/runTest.py create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/step_1_create_extension_with_older_pxf_version/sql/query01.sql create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/step_2_after_alter_extension/__init__.py create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/step_2_after_alter_extension/expected/query01.ans create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/step_2_after_alter_extension/runTest.py create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/step_2_after_alter_extension/sql/query01.sql create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/upgrade/__init__.py create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/upgrade/step_1_create_extension_with_older_pxf_version/__init__.py create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/upgrade/step_1_create_extension_with_older_pxf_version/expected/query01.ans create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/upgrade/step_1_create_extension_with_older_pxf_version/runTest.py create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/upgrade/step_1_create_extension_with_older_pxf_version/sql/query01.sql create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/upgrade/step_2_after_alter_extension/__init__.py create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/upgrade/step_2_after_alter_extension/expected/query01.ans create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/upgrade/step_2_after_alter_extension/runTest.py create mode 100644 automation/tincrepo/main/pxf/features/extension_tests/upgrade/step_2_after_alter_extension/sql/query01.sql create mode 100644 automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/__init__.py create mode 100644 automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_1_before_running_pxf_pre_gpupgrade/__init__.py rename automation/tincrepo/main/pxf/features/gpupgrade/{ => extension2_0}/step_1_before_running_pxf_pre_gpupgrade/expected/query01.ans (87%) rename automation/tincrepo/main/pxf/features/gpupgrade/{ => extension2_0}/step_1_before_running_pxf_pre_gpupgrade/runTest.py (100%) rename automation/tincrepo/main/pxf/features/gpupgrade/{step_3_after_running_pxf_post_gpupgrade => extension2_0/step_1_before_running_pxf_pre_gpupgrade}/sql/query01.sql (86%) create mode 100644 automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_2_after_running_pxf_pre_gpupgrade/__init__.py rename automation/tincrepo/main/pxf/features/gpupgrade/{ => extension2_0}/step_2_after_running_pxf_pre_gpupgrade/expected/query01.ans (87%) rename automation/tincrepo/main/pxf/features/gpupgrade/{ => extension2_0}/step_2_after_running_pxf_pre_gpupgrade/runTest.py (100%) rename automation/tincrepo/main/pxf/features/gpupgrade/{step_1_before_running_pxf_pre_gpupgrade => extension2_0/step_2_after_running_pxf_pre_gpupgrade}/sql/query01.sql (86%) create mode 100644 automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_3_after_running_pxf_post_gpupgrade/__init__.py rename automation/tincrepo/main/pxf/features/gpupgrade/{ => extension2_0}/step_3_after_running_pxf_post_gpupgrade/expected/query01.ans (87%) rename automation/tincrepo/main/pxf/features/gpupgrade/{ => extension2_0}/step_3_after_running_pxf_post_gpupgrade/runTest.py (100%) rename automation/tincrepo/main/pxf/features/gpupgrade/{step_2_after_running_pxf_pre_gpupgrade => extension2_0/step_3_after_running_pxf_post_gpupgrade}/sql/query01.sql (86%) create mode 100644 automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/__init__.py create mode 100644 automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_1_before_running_pxf_pre_gpupgrade/__init__.py create mode 100644 automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_1_before_running_pxf_pre_gpupgrade/expected/query01.ans create mode 100644 automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_1_before_running_pxf_pre_gpupgrade/runTest.py create mode 100644 automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_1_before_running_pxf_pre_gpupgrade/sql/query01.sql create mode 100644 automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_2_after_running_pxf_pre_gpupgrade/__init__.py create mode 100644 automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_2_after_running_pxf_pre_gpupgrade/expected/query01.ans create mode 100644 automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_2_after_running_pxf_pre_gpupgrade/runTest.py create mode 100644 automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_2_after_running_pxf_pre_gpupgrade/sql/query01.sql create mode 100644 automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_3_after_running_pxf_post_gpupgrade/__init__.py create mode 100644 automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_3_after_running_pxf_post_gpupgrade/expected/query01.ans create mode 100644 automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_3_after_running_pxf_post_gpupgrade/runTest.py create mode 100644 automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_3_after_running_pxf_post_gpupgrade/sql/query01.sql create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/__init__.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding/__init__.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding/expected/query01.ans create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding/runTest.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding/sql/query01.sql create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_bytes/__init__.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_bytes/expected/query01.ans create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_bytes/runTest.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_bytes/sql/query01.sql create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_quote/__init__.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_quote/expected/query01.ans create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_quote/runTest.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_quote/sql/query01.sql create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_quote_escape/__init__.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_quote_escape/expected/query01.ans create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_quote_escape/runTest.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_quote_escape/sql/query01.sql create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/four_byte/__init__.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/four_byte/expected/query01.ans create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/four_byte/runTest.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/four_byte/sql/query01.sql create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/multi_char/__init__.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/multi_char/expected/query01.ans create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/multi_char/runTest.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/multi_char/sql/query01.sql create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/no_profile/__init__.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/no_profile/expected/query01.ans create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/no_profile/runTest.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/no_profile/sql/query01.sql create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/one_byte/__init__.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/one_byte/expected/query01.ans create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/one_byte/runTest.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/one_byte/sql/query01.sql create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/one_col/__init__.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/one_col/expected/query01.ans create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/one_col/runTest.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/one_col/sql/query01.sql create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/one_col_quote/__init__.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/one_col_quote/expected/query01.ans create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/one_col_quote/runTest.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/one_col_quote/sql/query01.sql create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/quote_escape_newline/__init__.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/quote_escape_newline/expected/query01.ans create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/quote_escape_newline/runTest.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/quote_escape_newline/sql/query01.sql create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/three_byte/__init__.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/three_byte/expected/query01.ans create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/three_byte/runTest.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/three_byte/sql/query01.sql create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte/__init__.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte/expected/query01.ans create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte/runTest.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte/sql/query01.sql create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_no_delim/__init__.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_no_delim/expected/query01.ans create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_no_delim/runTest.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_no_delim/sql/query01.sql create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_bzip2/__init__.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_bzip2/expected/query01.ans create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_bzip2/runTest.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_bzip2/sql/query01.sql create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_cr/__init__.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_cr/expected/query01.ans create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_cr/runTest.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_cr/sql/query01.sql create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_crlf/__init__.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_crlf/expected/query01.ans create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_crlf/runTest.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_crlf/sql/query01.sql create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_quote/__init__.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_quote/expected/query01.ans create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_quote/runTest.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_quote/sql/query01.sql create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_quote_and_escape/__init__.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_quote_and_escape/expected/query01.ans create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_quote_and_escape/runTest.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_quote_and_escape/sql/query01.sql create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_delim/__init__.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_delim/expected/query01.ans create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_delim/runTest.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_delim/sql/query01.sql create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_eol/__init__.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_eol/expected/query01.ans create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_eol/runTest.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_eol/sql/query01.sql create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_eol_5X/__init__.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_eol_5X/expected/query01.ans create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_eol_5X/runTest.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_eol_5X/sql/query01.sql create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_escape/__init__.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_escape/expected/query01.ans create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_escape/runTest.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_escape/sql/query01.sql create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_quote/__init__.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_quote/expected/query01.ans create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_quote/runTest.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_quote/sql/query01.sql create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_quote_5X/__init__.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_quote_5X/expected/query01.ans create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_quote_5X/runTest.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_quote_5X/sql/query01.sql create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_wrong_formatter/__init__.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_wrong_formatter/expected/query01.ans create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_wrong_formatter/runTest.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_wrong_formatter/sql/query01.sql create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/wrong_profile/__init__.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/wrong_profile/expected/query01.ans create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/wrong_profile/runTest.py create mode 100755 automation/tincrepo/main/pxf/features/multibyte_delimiter/wrong_profile/sql/query01.sql create mode 100755 concourse/scripts/test_upgrade_extension.bash create mode 100644 concourse/tasks/upgrade_extension.yml create mode 100644 external-table/pxf--2.0--2.1.sql create mode 100644 external-table/pxf--2.1--2.0.sql create mode 100644 external-table/pxf--2.1.sql create mode 100644 external-table/src/pxfdelimited_formatter.c create mode 100644 external-table/src/pxfdelimited_formatter.h diff --git a/automation/src/main/java/org/greenplum/pxf/automation/components/gpdb/Gpdb.java b/automation/src/main/java/org/greenplum/pxf/automation/components/gpdb/Gpdb.java index 0d29cde978..5fb1832c17 100755 --- a/automation/src/main/java/org/greenplum/pxf/automation/components/gpdb/Gpdb.java +++ b/automation/src/main/java/org/greenplum/pxf/automation/components/gpdb/Gpdb.java @@ -52,9 +52,7 @@ public void init() throws Exception { * connecting it. */ driver = "org.postgresql.Driver"; - address = "jdbc:postgresql://" + getHost() + ":" + getPort() + "/template1"; - - connect(); + connectToDataBase("template1"); version = determineVersion(); if (!checkDataBaseExists(getDb())) { @@ -71,11 +69,7 @@ public void init() throws Exception { } } - super.close(); - - address = "jdbc:postgresql://" + getHost() + ":" + getPort() + "/" + getDb(); - - connect(); + connectToDataBase(getDb()); // Create the extensions if they don't exist String extensionName = FDWUtils.useFDW ? "pxf_fdw" : "pxf"; @@ -123,6 +117,12 @@ public void copyData(Table source, Table target) throws Exception { + source.getName()); } + public void connectToDataBase(String dbName) throws Exception { + super.close(); + address = "jdbc:postgresql://" + getHost() + ":" + getPort() + "/" + dbName; + connect(); + } + @Override public void createDataBase(String schemaName, boolean ignoreFail) throws Exception { diff --git a/automation/src/main/java/org/greenplum/pxf/automation/utils/csv/CsvUtils.java b/automation/src/main/java/org/greenplum/pxf/automation/utils/csv/CsvUtils.java index a9b0df601b..39d0f65a54 100755 --- a/automation/src/main/java/org/greenplum/pxf/automation/utils/csv/CsvUtils.java +++ b/automation/src/main/java/org/greenplum/pxf/automation/utils/csv/CsvUtils.java @@ -1,9 +1,14 @@ package org.greenplum.pxf.automation.utils.csv; -import java.io.File; +import java.io.FileOutputStream; import java.io.FileReader; -import java.io.FileWriter; import java.io.IOException; +import java.io.OutputStreamWriter; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.Arrays; import java.util.Iterator; import java.util.List; @@ -46,20 +51,44 @@ public static Table getTable(String pathToCsvFile) throws IOException { return dataTable; } + /** + * Update the delimiter in a CSV file. This helper function is required as CSVWriter only allows + * for single-character delimiters. As such, for test cases that have multi-character delimiters + * we have to update the delimiter in the file after the CSV has been created + * + * @param originalDelim Original single char delimiter + * @param newDelimiter Desired multi-char delimiter + * @throws IOException + */ + public static void updateDelim(String targetCsvFile, char originalDelim, String newDelimiter) + throws IOException { + + Path path = Paths.get(targetCsvFile); + + String content = new String(Files.readAllBytes(path), StandardCharsets.UTF_8); + content = content.replace(String.valueOf(originalDelim), newDelimiter); + Files.write(path, content.getBytes(StandardCharsets.UTF_8)); + } /** * Write {@link Table} to a CSV file. * * @param table {@link Table} contains required data list to write to CSV file * @param targetCsvFile to write the data Table + * @param charset the encoding charset to write in + * @param delimiter the separator value to use between columns + * @param quotechar the quote value to use for each col + * @param escapechar the escape value to use + * @param eol the eol value to indicate end of row * @throws IOException */ - public static void writeTableToCsvFile(Table table, String targetCsvFile) + public static void writeTableToCsvFile(Table table, String targetCsvFile, Charset charset, + char delimiter, char quotechar, + char escapechar, String eol) throws IOException { - // create CsvWriter using FileWriter - CSVWriter csvWriter = new CSVWriter(new FileWriter(targetCsvFile)); - + // create CsvWriter using OutputStreamWriter to allow for user given values + CSVWriter csvWriter = new CSVWriter(new OutputStreamWriter(new FileOutputStream(targetCsvFile), charset), delimiter, quotechar, escapechar, eol); try { // go over list and write each inner list to csv file for (List currentList : table.getData()) { @@ -75,4 +104,27 @@ public static void writeTableToCsvFile(Table table, String targetCsvFile) csvWriter.close(); } } + + /** + * Write {@link Table} to a CSV file with the default separator (delimiter), quote, escape and eol values + * + * @param table {@link Table} contains required data list to write to CSV file + * @param targetCsvFile to write the data Table + * @throws IOException + */ + public static void writeTableToCsvFile(Table table, String targetCsvFile) + throws IOException { + + // the default separator is , + // the default quote and escape values are both " + // the default eol value is \n + writeTableToCsvFile( + table, + targetCsvFile, + StandardCharsets.UTF_8, + CSVWriter.DEFAULT_SEPARATOR, + CSVWriter.DEFAULT_QUOTE_CHARACTER, + CSVWriter.DEFAULT_ESCAPE_CHARACTER, + CSVWriter.DEFAULT_LINE_END); + } } \ No newline at end of file diff --git a/automation/src/test/java/org/greenplum/pxf/automation/features/extension/PxfExtensionTest.java b/automation/src/test/java/org/greenplum/pxf/automation/features/extension/PxfExtensionTest.java new file mode 100644 index 0000000000..301dcdf369 --- /dev/null +++ b/automation/src/test/java/org/greenplum/pxf/automation/features/extension/PxfExtensionTest.java @@ -0,0 +1,135 @@ +package org.greenplum.pxf.automation.features.extension; + +import org.greenplum.pxf.automation.BaseFunctionality; +import org.greenplum.pxf.automation.structures.tables.basic.Table; +import org.greenplum.pxf.automation.structures.tables.pxf.ReadableExternalTable; +import org.greenplum.pxf.automation.structures.tables.utils.TableFactory; +import org.testng.annotations.Test; + +public class PxfExtensionTest extends BaseFunctionality { + + public static final String[] FIELDS = { + "name text", + "num integer", + "dub double precision", + "longNum bigint", + "bool boolean" + }; + + private ReadableExternalTable externalTable; + private String location; + private String location_multi; + + @Override + public void beforeClass() throws Exception { + super.beforeClass(); + gpdb.dropDataBase("pxfautomation_extension", true, true); + gpdb.createDataBase("pxfautomation_extension", false); + gpdb.setDb("pxfautomation_extension"); + gpdb.connectToDataBase("pxfautomation_extension"); + + Table smallData = getSmallData("", 10); + + location = hdfs.getWorkingDirectory() + "/upgrade-test-data.txt"; + hdfs.writeTableToFile(location, smallData, ","); + + location_multi = hdfs.getWorkingDirectory() + "/upgrade-test-data_multibyte.txt"; + hdfs.writeTableToFile(location_multi, smallData, "停"); + } + + @Override + public void beforeMethod() throws Exception { + gpdb.setDb("pxfautomation_extension"); + gpdb.connectToDataBase("pxfautomation_extension"); + gpdb.runQuery("DROP EXTENSION IF EXISTS pxf CASCADE", true, false); + } + + @Test(groups = {"gpdb", "pxfExtensionVersion2_1"}) + public void testPxfCreateExtension() throws Exception { + gpdb.runQuery("CREATE EXTENSION pxf"); + // create a regular external table + createReadablePxfTable("default", location, false); + // create an external table with the multibyte formatter + createReadablePxfTable("default", location_multi, true); + runTincTest("pxf.features.extension_tests.create_extension.runTest"); + } + + @Test(groups = {"pxfExtensionVersion2"}) + public void testPxfCreateExtensionOldRPM() throws Exception { + gpdb.runQuery("CREATE EXTENSION pxf"); + // create a regular external table + createReadablePxfTable("default", location, false); + // create an external table with the multibyte formatter + createReadablePxfTable("default", location_multi, true); + runTincTest("pxf.features.extension_tests.create_extension_rpm.runTest"); + } + + @Test(groups = {"gpdb", "pxfExtensionVersion2_1"}) + public void testPxfUpgrade() throws Exception { + gpdb.runQuery("CREATE EXTENSION pxf VERSION '2.0'"); + createReadablePxfTable("default", location, false); + runTincTest("pxf.features.extension_tests.upgrade.step_1_create_extension_with_older_pxf_version.runTest"); + + // create an external table with the multibyte formatter + createReadablePxfTable("default", location_multi, true); + gpdb.runQuery("ALTER EXTENSION pxf UPDATE"); + runTincTest("pxf.features.extension_tests.upgrade.step_2_after_alter_extension.runTest"); + } + + @Test(groups = {"gpdb", "pxfExtensionVersion2_1"}) + public void testPxfExplicitUpgrade() throws Exception { + gpdb.runQuery("CREATE EXTENSION pxf VERSION '2.0'"); + createReadablePxfTable("default", location, false); + runTincTest("pxf.features.extension_tests.explicit_upgrade.step_1_create_extension_with_older_pxf_version.runTest"); + + // create an external table with the multibyte formatter + createReadablePxfTable("default", location_multi, true); + gpdb.runQuery("ALTER EXTENSION pxf UPDATE TO '2.1'"); + runTincTest("pxf.features.extension_tests.explicit_upgrade.step_2_after_alter_extension.runTest"); + } + + @Test(groups = {"gpdb", "pxfExtensionVersion2_1"}) + public void testPxfDowngrade() throws Exception { + gpdb.runQuery("CREATE EXTENSION pxf"); + + createReadablePxfTable("default", location, false); + // create an external table with the multibyte formatter + createReadablePxfTable("default", location_multi, true); + runTincTest("pxf.features.extension_tests.downgrade.step_1_create_extension.runTest"); + + gpdb.runQuery("ALTER EXTENSION pxf UPDATE TO '2.0'"); + runTincTest("pxf.features.extension_tests.downgrade.step_2_after_alter_extension_downgrade.runTest"); + } + + @Test(groups = {"gpdb", "pxfExtensionVersion2_1"}) + public void testPxfDowngradeThenUpgradeAgain() throws Exception { + gpdb.runQuery("CREATE EXTENSION pxf"); + + createReadablePxfTable("default", location, false); + // create an external table with the multibyte formatter + createReadablePxfTable("default", location_multi, true); + runTincTest("pxf.features.extension_tests.downgrade_then_upgrade.step_1_check_extension.runTest"); + + gpdb.runQuery("ALTER EXTENSION pxf UPDATE TO '2.0'"); + runTincTest("pxf.features.extension_tests.downgrade_then_upgrade.step_2_after_alter_extension_downgrade.runTest"); + + gpdb.runQuery("ALTER EXTENSION pxf UPDATE TO '2.1'"); + runTincTest("pxf.features.extension_tests.downgrade_then_upgrade.step_3_after_alter_extension_upgrade.runTest"); + } + + private void createReadablePxfTable(String serverName, String location, boolean multi) throws Exception { + if (multi) { + externalTable = TableFactory.getPxfReadableTextTable("pxf_upgrade_test_multibyte", FIELDS, location, null); + externalTable.setFormat("CUSTOM"); + externalTable.setFormatter("pxfdelimited_import"); + externalTable.addFormatterOption("delimiter='停'"); + } else { + externalTable = TableFactory.getPxfReadableTextTable("pxf_upgrade_test", FIELDS, location, ","); + } + externalTable.setHost(pxfHost); + externalTable.setPort(pxfPort); + externalTable.setServer("SERVER=" + serverName); + gpdb.createTableAndVerify(externalTable); + } + +} diff --git a/automation/src/test/java/org/greenplum/pxf/automation/features/gpupgrade/GpupgradeTest.java b/automation/src/test/java/org/greenplum/pxf/automation/features/gpupgrade/GpupgradeTest.java index 9ebe53912d..ec04195dd5 100644 --- a/automation/src/test/java/org/greenplum/pxf/automation/features/gpupgrade/GpupgradeTest.java +++ b/automation/src/test/java/org/greenplum/pxf/automation/features/gpupgrade/GpupgradeTest.java @@ -34,6 +34,23 @@ protected void afterMethod() throws Exception { super.afterMethod(); } + @Test(groups = {"features", "gpdb"}) + public void testGpdbUpgradeExtensionVersion2_0Scenario() throws Exception { + + // Skipping this test for GP7 since this isn't passing for GP7 + if (gpdb.getVersion() >= 7) + throw new SkipException("Skipping testGpdbUpgradeScenario for GPDB7"); + + gpdb.runQuery("ALTER EXTENSION pxf UPDATE TO '2.0'"); + runTincTest("pxf.features.gpupgrade.extension2_0.step_1_before_running_pxf_pre_gpupgrade.runTest"); + + cluster.runCommand("pxf-pre-gpupgrade"); + runTincTest("pxf.features.gpupgrade.extension2_0.step_2_after_running_pxf_pre_gpupgrade.runTest"); + + cluster.runCommand("pxf-post-gpupgrade"); + runTincTest("pxf.features.gpupgrade.extension2_0.step_3_after_running_pxf_post_gpupgrade.runTest"); + } + @Test(groups = {"features", "gpdb"}) public void testGpdbUpgradeScenario() throws Exception { @@ -41,13 +58,14 @@ public void testGpdbUpgradeScenario() throws Exception { if (gpdb.getVersion() >= 7) throw new SkipException("Skipping testGpdbUpgradeScenario for GPDB7"); - runTincTest("pxf.features.gpupgrade.step_1_before_running_pxf_pre_gpupgrade.runTest"); + gpdb.runQuery("ALTER EXTENSION pxf UPDATE TO '2.1'"); + runTincTest("pxf.features.gpupgrade.extension2_1.step_1_before_running_pxf_pre_gpupgrade.runTest"); cluster.runCommand("pxf-pre-gpupgrade"); - runTincTest("pxf.features.gpupgrade.step_2_after_running_pxf_pre_gpupgrade.runTest"); + runTincTest("pxf.features.gpupgrade.extension2_1.step_2_after_running_pxf_pre_gpupgrade.runTest"); cluster.runCommand("pxf-post-gpupgrade"); - runTincTest("pxf.features.gpupgrade.step_3_after_running_pxf_post_gpupgrade.runTest"); + runTincTest("pxf.features.gpupgrade.extension2_1.step_3_after_running_pxf_post_gpupgrade.runTest"); } private String prepareData() throws Exception { diff --git a/automation/src/test/java/org/greenplum/pxf/automation/features/multibytedelimiter/MultibyteDelimiterTest.java b/automation/src/test/java/org/greenplum/pxf/automation/features/multibytedelimiter/MultibyteDelimiterTest.java new file mode 100644 index 0000000000..6ac0ce539d --- /dev/null +++ b/automation/src/test/java/org/greenplum/pxf/automation/features/multibytedelimiter/MultibyteDelimiterTest.java @@ -0,0 +1,655 @@ +package org.greenplum.pxf.automation.features.multibytedelimiter; + +import au.com.bytecode.opencsv.CSVWriter; +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.io.compress.BZip2Codec; +import org.greenplum.pxf.automation.datapreparer.CustomTextPreparer; +import org.greenplum.pxf.automation.features.BaseFeature; +import org.greenplum.pxf.automation.structures.tables.basic.Table; +import org.greenplum.pxf.automation.structures.tables.utils.TableFactory; +import org.greenplum.pxf.automation.utils.csv.CsvUtils; +import org.greenplum.pxf.automation.utils.exception.ExceptionUtils; +import org.greenplum.pxf.automation.utils.fileformats.FileFormatsUtils; +import org.greenplum.pxf.automation.utils.system.ProtocolEnum; +import org.greenplum.pxf.automation.utils.system.ProtocolUtils; +import org.junit.Assert; +import org.postgresql.util.PSQLException; +import org.testng.annotations.Test; + +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; + +import static java.lang.Thread.sleep; + +/** + * Collection of Test cases for PXF ability to read Text/CSV files with pxfdelimited_import. + */ +public class MultibyteDelimiterTest extends BaseFeature { + ProtocolEnum protocol; + + // holds data for file generation + Table dataTable = null; + + // holds data for encoded file generation + Table encodedDataTable = null; + + // path for storing data on HDFS + String hdfsFilePath = ""; + + private static final String[] ROW_WITH_ESCAPE = {"s_101", + "s_1001", + "s_10001", + "2299-11-28 05:46:40", + "101", + "1001", + "10001", + "10001", + "10001", + "10001", + "10001", + "s_101 | escaped!", + "s_1001", + "s_10001", + "2299-11-28 05:46:40", + "101", + "1001", + "10001", + "10001", + "10001", + "10001", + "10001"}; + + private class CsvSpec { + String delimiter; + char quote; + char escape; + String eol; + Charset encoding; + public CsvSpec(String delimiter, char quote, char escape, String eol) { + this.delimiter = delimiter; + this.quote = quote; + this.escape = escape; + this.eol = eol; + this.encoding = StandardCharsets.UTF_8; + } + + public CsvSpec(String delimiter, char quote, char escape) { + this(delimiter, quote, escape, CSVWriter.DEFAULT_LINE_END); + } + + public CsvSpec(String delimiter) { + this(delimiter, CSVWriter.NO_QUOTE_CHARACTER, CSVWriter.NO_ESCAPE_CHARACTER, CSVWriter.DEFAULT_LINE_END); + } + + public void setDelimiter(String delimiter) { + this.delimiter = delimiter; + } + + public void setQuote(char quote) { + this.quote = quote; + } + + public void setEscape(char escape) { + this.escape = escape; + } + + public void setEol(String eol) { + this.eol = eol; + } + + public void setEncoding(Charset encoding) { + this.encoding = encoding; + } + + /** + * This function takes the CsvSpec used for writing the file + * and clones it to be used as table formatter options. + * + * In the case of EOL handling, we do not want to include the + * EOL value as a formatter option if it is the default (\n). + * However, for '\r' the corresponding value is 'CR' and for '\r\n' + * the corresponding value is 'CRLF'. Anything else, we return as is. + * + * @return A clone of the CsvSpec to be used as formatter options for the table DDL + */ + public CsvSpec cloneForFormatting() { + // handle EOL situation + String eol = this.eol; + switch (eol) { + case "\r": + eol = "CR"; + break; + case "\r\n": + eol = "CRLF"; + break; + case CSVWriter.DEFAULT_LINE_END: + // for the default case, we do not want to set the eol value in the formatter options + eol = null; + break; + default: + eol = this.eol; + } + + CsvSpec clone = new CsvSpec(this.delimiter, this.quote, this.escape, eol); + // we do not care about the encoding value as a formatter option in the table DDL + clone.setEncoding(null); + + return clone; + } + } + + /** + * Prepare all components and all data flow (Hdfs to GPDB) + */ + @Override + public void beforeClass() throws Exception { + protocol = ProtocolUtils.getProtocol(); + } + + /** + * Before every method determine default hdfs data Path, default data, and + * default external table structure. Each case change it according to it + * needs. + */ + @Override + protected void beforeMethod() throws Exception { + super.beforeMethod(); + // path for storing data on HDFS + hdfsFilePath = hdfs.getWorkingDirectory() + "/multibyteDelimiter"; + // prepare data in table + dataTable = new Table("dataTable", null); + FileFormatsUtils.prepareData(new CustomTextPreparer(), 100, dataTable); + // default definition of external table + exTable = TableFactory.getPxfReadableTextTable("pxf_multibyte_small_data", + new String[]{ + "s1 text", + "s2 text", + "s3 text", + "d1 timestamp", + "n1 int", + "n2 int", + "n3 int", + "n4 int", + "n5 int", + "n6 int", + "n7 int", + "s11 text", + "s12 text", + "s13 text", + "d11 timestamp", + "n11 int", + "n12 int", + "n13 int", + "n14 int", + "n15 int", + "n16 int", + "n17 int"}, + protocol.getExternalTablePath(hdfs.getBasePath(), hdfsFilePath), + null); + exTable.setFormat("CUSTOM"); + exTable.setFormatter("pxfdelimited_import"); + exTable.setProfile(protocol.value() + ":csv"); + + encodedDataTable = new Table("data", null); + encodedDataTable.addRow(new String[]{"4", "tá sé seo le tástáil dea-"}); + encodedDataTable.addRow(new String[]{"3", "règles d'automation"}); + encodedDataTable.addRow(new String[]{"5", "minden amire szüksége van a szeretet"}); + } + + @Test(groups = {"gpdb", "hcfs", "security"}) + public void readTwoByteDelimiter() throws Exception { + // used for creating the CSV file + CsvSpec fileSpec = new CsvSpec("¤"); + + runScenario("pxf_multibyte_twobyte_data", dataTable, fileSpec); + + // create a new table with the SKIP_HEADER_COUNT parameter + exTable.setName("pxf_multibyte_twobyte_data_with_skip"); + exTable.setUserParameters(new String[]{"SKIP_HEADER_COUNT=10"}); + // create external table + gpdb.createTableAndVerify(exTable); + + // verify results + runTincTest("pxf.features.multibyte_delimiter.two_byte.runTest"); + } + + @Test(groups = {"gpdb", "hcfs", "security"}) + public void readThreeByteDelimiter() throws Exception { + CsvSpec fileSpec = new CsvSpec("停"); + + runScenario("pxf_multibyte_threebyte_data", dataTable, fileSpec); + + // create a new table with the SKIP_HEADER_COUNT parameter + exTable.setName("pxf_multibyte_threebyte_data_with_skip"); + exTable.setUserParameters(new String[]{"SKIP_HEADER_COUNT=10"}); + // create external table + gpdb.createTableAndVerify(exTable); + + // verify results + runTincTest("pxf.features.multibyte_delimiter.three_byte.runTest"); + } + + @Test(groups = {"gpdb", "hcfs", "security"}) + public void readFourByteDelimiter() throws Exception { + CsvSpec fileSpec = new CsvSpec("\uD83D\uDE42"); + + runScenario("pxf_multibyte_fourbyte_data", dataTable, fileSpec); + + // create a new table with the SKIP_HEADER_COUNT parameter + exTable.setName("pxf_multibyte_fourbyte_data_with_skip"); + exTable.setUserParameters(new String[]{"SKIP_HEADER_COUNT=10"}); + // create external table + gpdb.createTableAndVerify(exTable); + + // verify results + runTincTest("pxf.features.multibyte_delimiter.four_byte.runTest"); + } + + @Test(groups = {"gpdb", "hcfs", "security"}) + public void readMultiCharStringDelimiter() throws Exception { + CsvSpec fileSpec = new CsvSpec("DELIM"); + + runScenario("pxf_multibyte_multichar_data", dataTable, fileSpec); + + // create a new table with the SKIP_HEADER_COUNT parameter + exTable.setName("pxf_multibyte_multichar_data_with_skip"); + exTable.setUserParameters(new String[]{"SKIP_HEADER_COUNT=10"}); + // create external table + gpdb.createTableAndVerify(exTable); + + // verify results + runTincTest("pxf.features.multibyte_delimiter.multi_char.runTest"); + } + + @Test(groups = {"gpdb", "hcfs", "security"}) + public void readTwoByteDelimiterWithCRLF() throws Exception { + CsvSpec fileSpec = new CsvSpec("¤", CSVWriter.NO_QUOTE_CHARACTER, CSVWriter.NO_ESCAPE_CHARACTER, "\r\n"); + + runScenario("pxf_multibyte_twobyte_withcrlf_data", dataTable, fileSpec); + + // verify results + runTincTest("pxf.features.multibyte_delimiter.two_byte_with_crlf.runTest"); + } + + @Test(groups = {"gpdb", "hcfs", "security"}) + public void readTwoByteDelimiterWithCR() throws Exception { + CsvSpec fileSpec = new CsvSpec("¤", CSVWriter.NO_QUOTE_CHARACTER, CSVWriter.NO_ESCAPE_CHARACTER, "\r"); + + // we need to add the eol value to the URL to be able to parse the data on PXF Java side + exTable.setUserParameters(new String[] {"NEWLINE=CR"}); + + runScenario("pxf_multibyte_twobyte_withcr_data", dataTable, fileSpec); + + // verify results + runTincTest("pxf.features.multibyte_delimiter.two_byte_with_cr.runTest"); + } + + @Test(groups = {"gpdb", "hcfs", "security"}) + public void readTwoByteDelimiterWrongFormatter() throws Exception { + CsvSpec fileSpec = new CsvSpec("¤"); + + exTable.setFormatter("pxfwritable_import"); + + runScenario("pxf_multibyte_twobyte_wrongformatter_data", dataTable, fileSpec); + + // verify results + runTincTest("pxf.features.multibyte_delimiter.two_byte_wrong_formatter.runTest"); + } + + @Test(groups = {"gpdb", "hcfs", "security"}) + public void readTwoByteDelimiterDelimNotProvided() throws Exception { + CsvSpec fileSpec = new CsvSpec("¤"); + CsvSpec tableSpec = fileSpec.cloneForFormatting(); + // remove the delimiter for the formatterOptions + tableSpec.setDelimiter(null); + + runScenario("pxf_multibyte_twobyte_nodelim_data", tableSpec, dataTable, fileSpec); + + // verify results + runTincTest("pxf.features.multibyte_delimiter.two_byte_no_delim.runTest"); + } + + @Test(groups = {"gpdb", "hcfs", "security"}) + public void readTwoByteDelimiterWithWrongDelimiter() throws Exception { + CsvSpec fileSpec = new CsvSpec("¤"); + CsvSpec tableSpec = fileSpec.cloneForFormatting(); + // set the wrong delimiter for the formatterOptions + tableSpec.setDelimiter("停"); + + runScenario("pxf_multibyte_twobyte_wrong_delim_data", tableSpec, dataTable, fileSpec); + + // verify results + runTincTest("pxf.features.multibyte_delimiter.two_byte_with_wrong_delim.runTest"); + } + + @Test(groups = {"gpdb", "hcfs", "security"}) + public void readTwoByteDelimiterWithQuote() throws Exception { + CsvSpec fileSpec = new CsvSpec("¤", CSVWriter.DEFAULT_QUOTE_CHARACTER, CSVWriter.NO_ESCAPE_CHARACTER); + + runScenario("pxf_multibyte_twobyte_withquote_data", dataTable, fileSpec); + + // verify results + runTincTest("pxf.features.multibyte_delimiter.two_byte_with_quote.runTest"); + } + + @Test(groups = {"gpdb", "hcfs", "security"}) + public void readTwoByteDelimiterWithWrongEol() throws Exception { + CsvSpec fileSpec = new CsvSpec("¤", CSVWriter.DEFAULT_QUOTE_CHARACTER, CSVWriter.DEFAULT_ESCAPE_CHARACTER); + CsvSpec tableSpec = fileSpec.cloneForFormatting(); + // set the wrong eol for the formatterOptions + tableSpec.setEol("CR"); + + runScenario("pxf_multibyte_twobyte_wrong_eol_data", tableSpec, dataTable, fileSpec); + + // verify results + // in newer versions of GP6 and in GP7, GPDB calls into the formatter one more time to handle EOF properly + // however, this is not the case for GP5 and for versions of GP6 older than 6.24.0 + // therefore, we must run 2 different sets of tests to check for the expected error + if (gpdb.getVersion() >= 6) { + runTincTest("pxf.features.multibyte_delimiter.two_byte_with_wrong_eol.runTest"); + } else { + runTincTest("pxf.features.multibyte_delimiter.two_byte_with_wrong_eol_5X.runTest"); + } + } + + @Test(groups = {"gpdb", "hcfs", "security"}) + public void readTwoByteDelimiterWithWrongQuote() throws Exception { + CsvSpec fileSpec = new CsvSpec("¤", CSVWriter.DEFAULT_QUOTE_CHARACTER, CSVWriter.DEFAULT_ESCAPE_CHARACTER); + CsvSpec tableSpec = fileSpec.cloneForFormatting(); + // set the wrong quote for the formatterOptions + tableSpec.setQuote('|'); + + runScenario("pxf_multibyte_twobyte_wrong_quote_data", tableSpec, dataTable, fileSpec); + + // verify results + // in newer versions of GP6 and in GP7, GPDB calls into the formatter one more time to handle EOF properly + // however, this is not the case for GP5 and for versions of GP6 older than 6.24.0 + // therefore, we must run 2 different sets of tests to check for the expected error + if (gpdb.getVersion() >= 6) { + runTincTest("pxf.features.multibyte_delimiter.two_byte_with_wrong_quote.runTest"); + } else { + runTincTest("pxf.features.multibyte_delimiter.two_byte_with_wrong_quote_5X.runTest"); + } + } + + @Test(groups = {"gpdb", "hcfs", "security"}) + public void readTwoByteDelimiterWithQuoteAndEscape() throws Exception { + CsvSpec fileSpec = new CsvSpec("¤", '|', '\\'); + + dataTable.addRow(ROW_WITH_ESCAPE); + + runScenario("pxf_multibyte_twobyte_withquote_withescape_data", dataTable, fileSpec); + + // verify results + runTincTest("pxf.features.multibyte_delimiter.two_byte_with_quote_and_escape.runTest"); + } + + @Test(groups = {"gpdb", "hcfs", "security"}) + public void readTwoByteDelimiterWithWrongEscape() throws Exception { + CsvSpec fileSpec = new CsvSpec("¤", '|', '\\'); + CsvSpec tableSpec = fileSpec.cloneForFormatting(); + tableSpec.setEscape('#'); + + dataTable.addRow(ROW_WITH_ESCAPE); + + runScenario("pxf_multibyte_twobyte_wrong_escape_data", tableSpec, dataTable, fileSpec); + + // verify results + runTincTest("pxf.features.multibyte_delimiter.two_byte_with_wrong_escape.runTest"); + } + + // users should still be able to use a normal delimiter with this formatter + @Test(groups = {"gpdb", "hcfs", "security"}) + public void readOneByteDelimiter() throws Exception { + CsvSpec fileSpec = new CsvSpec("|"); + + runScenario("pxf_multibyte_onebyte_data", dataTable, fileSpec); + + // verify results + runTincTest("pxf.features.multibyte_delimiter.one_byte.runTest"); + } + + + @Test(groups = {"gpdb", "hcfs", "security"}) + public void readOneCol() throws Exception { + CsvSpec fileSpec = new CsvSpec("¤"); + + exTable.setFields(new String[]{"s1 text"}); + + dataTable = new Table("data", null); + dataTable.addRow(new String[]{"tá sé seo le tástáil dea-"}); + dataTable.addRow(new String[]{"règles d'automation"}); + dataTable.addRow(new String[]{"minden amire szüksége van a szeretet"}); + + runScenario("pxf_multibyte_onecol_data", dataTable, fileSpec); + + // verify results + runTincTest("pxf.features.multibyte_delimiter.one_col.runTest"); + } + + @Test(groups = {"gpdb", "hcfs", "security"}) + public void readOneColQuote() throws Exception { + CsvSpec fileSpec = new CsvSpec("¤", '|', CSVWriter.NO_ESCAPE_CHARACTER); + + exTable.setFields(new String[]{"s1 text"}); + + dataTable = new Table("data", null); + dataTable.addRow(new String[]{"tá sé seo le tástáil dea-"}); + dataTable.addRow(new String[]{"règles d'automation"}); + dataTable.addRow(new String[]{"minden amire szüksége van a szeretet"}); + + runScenario("pxf_multibyte_onecol_quote_data", dataTable, fileSpec); + + // verify results + runTincTest("pxf.features.multibyte_delimiter.one_col_quote.runTest"); + } + + @Test(groups = {"gpdb", "hcfs", "security"}) + public void readBzip2CompressedCsv() throws Exception { + BZip2Codec codec = new BZip2Codec(); + codec.setConf(hdfs.getConfiguration()); + char c = 'a'; + + for (int i = 0; i < 10; i++, c++) { + Table dataTable = getSmallData(StringUtils.repeat(String.valueOf(c), 2), 10); + hdfs.writeTableToFile(hdfs.getWorkingDirectory() + "/bzip2/" + c + "_" + fileName + ".bz2", + dataTable, "¤", StandardCharsets.UTF_8, codec); + } + + createCsvExternalTable("pxf_multibyte_twobyte_withbzip2_data", + new String[] { + "name text", + "num integer", + "dub double precision", + "longNum bigint", + "bool boolean" + }, + protocol.getExternalTablePath(hdfs.getBasePath(), hdfs.getWorkingDirectory())+ "/bzip2/", + new String[] {"delimiter='¤'"}); + + runTincTest("pxf.features.multibyte_delimiter.two_byte_with_bzip2.runTest"); + } + + @Test(groups = {"gpdb", "hcfs", "security"}) + public void readTwoByteWithQuoteEscapeNewLine() throws Exception { + CsvSpec fileSpec = new CsvSpec("¤", '|', '\\', "EOL"); + + dataTable.addRow(ROW_WITH_ESCAPE); + + runScenario("pxf_multibyte_quote_escape_newline_data", dataTable, fileSpec); + + // verify results + runTincTest("pxf.features.multibyte_delimiter.quote_escape_newline.runTest"); + } + + @Test(groups = {"gpdb", "hcfs", "security"}) + public void invalidCodePoint() throws Exception { + exTable.setName("pxf_multibyte_invalid_codepoint_data"); + exTable.setFormatterOptions(new String[] {"delimiter=E'\\xA4'"}); + + // create external table + try { + gpdb.createTableAndVerify(exTable); + Assert.fail("Insert data should fail because of unsupported type"); + } catch (PSQLException e) { + ExceptionUtils.validate(null, e, new PSQLException("ERROR.*invalid byte sequence for encoding.*?", null), true); + } + } + + @Test(groups = {"gpdb", "hcfs", "security"}) + public void readFileWithLatin1EncodingTextProfile() throws Exception { + CsvSpec fileSpec = new CsvSpec("¤"); + // set the encoding value since the default value in CsvSpec is UTF-8 + fileSpec.setEncoding(StandardCharsets.ISO_8859_1); + + exTable.setFields(new String[]{"num1 int", "word text"}); + exTable.setEncoding("LATIN1"); + + runScenario("pxf_multibyte_encoding", encodedDataTable, fileSpec); + + // verify results + runTincTest("pxf.features.multibyte_delimiter.encoding.runTest"); + } + + @Test(groups = {"gpdb", "hcfs", "security"}) + public void readFileWithLatin1EncodingByteRepresentationTextProfile() throws Exception { + CsvSpec fileSpec = new CsvSpec("¤"); + // set the encoding value since the default value in CsvSpec is UTF-8 + fileSpec.setEncoding(StandardCharsets.ISO_8859_1); + CsvSpec tableSpec = fileSpec.cloneForFormatting(); + // use byte encoding instead + tableSpec.setDelimiter("\\xC2\\xA4"); + + exTable.setFields(new String[]{"num1 int", "word text"}); + exTable.setEncoding("LATIN1"); + exTable.setProfile(protocol.value() + ":text"); + + runScenario("pxf_multibyte_encoding_bytes", tableSpec, encodedDataTable, fileSpec); + + // verify results + runTincTest("pxf.features.multibyte_delimiter.encoding_bytes.runTest"); + } + + @Test(groups = {"gpdb", "hcfs", "security"}) + public void readFileWithLatin1EncodingWithQuoteTextProfile() throws Exception { + CsvSpec fileSpec = new CsvSpec("¤", '|', '|'); + // set the encoding value since the default value in CsvSpec is UTF-8 + fileSpec.setEncoding(StandardCharsets.ISO_8859_1); + + exTable.setFields(new String[]{"num1 int", "word text"}); + exTable.setEncoding("LATIN1"); + exTable.setProfile(protocol.value() + ":text"); + + runScenario("pxf_multibyte_encoding_quote", encodedDataTable, fileSpec); + + // verify results + runTincTest("pxf.features.multibyte_delimiter.encoding_quote.runTest"); + } + + @Test(groups = {"gpdb", "hcfs", "security"}) + public void readFileWithLatin1EncodingWithQuoteAndEscapeTextProfile() throws Exception { + CsvSpec fileSpec = new CsvSpec("¤", '|', '\"'); + // set the encoding value since the default value in CsvSpec is UTF-8 + fileSpec.setEncoding(StandardCharsets.ISO_8859_1); + + exTable.setFields(new String[]{"num1 int", "word text"}); + exTable.setEncoding("LATIN1"); + exTable.setProfile(protocol.value() + ":text"); + + runScenario("pxf_multibyte_encoding_quote_escape", encodedDataTable, fileSpec); + + // verify results + runTincTest("pxf.features.multibyte_delimiter.encoding_quote_escape.runTest"); + } + + @Test(groups = {"gpdb", "hcfs", "security"}) + public void wrongProfileWithFormatter() throws Exception { + exTable.setName("pxf_multibyte_wrong_profile"); + exTable.setFormatterOptions(new String[] {"delimiter='¤'", "quote='|'", "escape='\"'"}); + exTable.setProfile(protocol.value() + ":avro"); + exTable.setFields(new String[]{"name text", "age int"}); + + // prepare data and write to HDFS + gpdb.createTableAndVerify(exTable); + // location of schema and data files + String absolutePath = getClass().getClassLoader().getResource("data").getPath(); + String resourcePath = absolutePath + "/avro/"; + hdfs.writeAvroFileFromJson(hdfsFilePath + "simple.avro", + "file://" + resourcePath + "simple.avsc", + "file://" + resourcePath + "simple.json", null); + + // verify results + runTincTest("pxf.features.multibyte_delimiter.wrong_profile.runTest"); + } + + @Test(groups = {"gpdb", "hcfs", "security"}) + public void noProfileWithFormatter() throws Exception { + exTable.setName("pxf_multibyte_no_profile"); + exTable.setFormatterOptions(new String[] {"delimiter='¤'", "quote='|'", "escape='\"'"}); + exTable.setProfile(null); + exTable.setFragmenter("default-fragmenter"); + exTable.setAccessor("default-accessor"); + exTable.setResolver("default-resolver"); + exTable.setFields(new String[]{"name text", "age int"}); + + gpdb.createTableAndVerify(exTable); + + // verify results + runTincTest("pxf.features.multibyte_delimiter.no_profile.runTest"); + } + + private void createCsvExternalTable(String name, String[] cols, String path, String[] formatterOptions) throws Exception { + exTable = TableFactory.getPxfReadableTextTable(name, cols, path, null); + exTable.setFormat("CUSTOM"); + exTable.setFormatter("pxfdelimited_import"); + exTable.setProfile(protocol.value() + ":csv"); + exTable.setFormatterOptions(formatterOptions); + + gpdb.createTableAndVerify(exTable); + } + + private void writeCsvFileToHdfs(Table dataTable, CsvSpec spec) throws Exception { + // create local CSV + String tempLocalDataPath = dataTempFolder + "/data.csv"; + if (spec.delimiter.length() > 1) { + CsvUtils.writeTableToCsvFile(dataTable, tempLocalDataPath, spec.encoding, + '|', spec.quote, spec.escape, spec.eol); + CsvUtils.updateDelim(tempLocalDataPath, '|', spec.delimiter); + } else { + CsvUtils.writeTableToCsvFile(dataTable, tempLocalDataPath, spec.encoding, + spec.delimiter.charAt(0), spec.quote, spec.escape, spec.eol); + } + + // copy local CSV to HDFS + hdfs.copyFromLocal(tempLocalDataPath, hdfsFilePath); + sleep(2500); + } + + private void runScenario(String tableName, CsvSpec tableSpec, Table dataTable, CsvSpec fileSpec) throws Exception { + exTable.setName(tableName); + // set the formatter options using the table spec + if (tableSpec.delimiter != null) { + exTable.addFormatterOption("delimiter=E'" + tableSpec.delimiter + "'"); + } + if (tableSpec.quote != CSVWriter.NO_QUOTE_CHARACTER) { + exTable.addFormatterOption("quote='" + tableSpec.quote + "'"); + } + if (tableSpec.escape != CSVWriter.NO_ESCAPE_CHARACTER) { + exTable.addFormatterOption("escape='" + tableSpec.escape + "'"); + } + if (tableSpec.eol != null) { + exTable.addFormatterOption("newline='" + tableSpec.eol + "'"); + } + + // create external table + gpdb.createTableAndVerify(exTable); + + // create CSV file in hdfs using the provided data table and file spec + writeCsvFileToHdfs(dataTable, fileSpec); + } + + private void runScenario(String tableName, Table dataTable, CsvSpec fileSpec) throws Exception { + runScenario(tableName, fileSpec.cloneForFormatting(), dataTable, fileSpec); + + } +} diff --git a/automation/tincrepo/main/pxf/features/gpupgrade/step_1_before_running_pxf_pre_gpupgrade/__init__.py b/automation/tincrepo/main/pxf/features/extension_tests/__init__.py similarity index 100% rename from automation/tincrepo/main/pxf/features/gpupgrade/step_1_before_running_pxf_pre_gpupgrade/__init__.py rename to automation/tincrepo/main/pxf/features/extension_tests/__init__.py diff --git a/automation/tincrepo/main/pxf/features/gpupgrade/step_2_after_running_pxf_pre_gpupgrade/__init__.py b/automation/tincrepo/main/pxf/features/extension_tests/create_extension/__init__.py similarity index 100% rename from automation/tincrepo/main/pxf/features/gpupgrade/step_2_after_running_pxf_pre_gpupgrade/__init__.py rename to automation/tincrepo/main/pxf/features/extension_tests/create_extension/__init__.py diff --git a/automation/tincrepo/main/pxf/features/extension_tests/create_extension/expected/query01.ans b/automation/tincrepo/main/pxf/features/extension_tests/create_extension/expected/query01.ans new file mode 100644 index 0000000000..c6553ee2e3 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/create_extension/expected/query01.ans @@ -0,0 +1,72 @@ +-- start_ignore +-- end_ignore +-- @description query01 for PXF install test on small data +-- start_matchsubs +-- +-- m{.*/usr/local/pxf-(dev|gp\d).*} +-- s{/usr/local/pxf-(dev|gp\d)}{\$PXF_HOME} +-- +-- m{.*\$libdir/pxf.*} +-- s{\$libdir}{\$PXF_HOME/gpextable} +-- +-- end_matchsubs +-- start_ignore +\c pxfautomation_extension +-- end_ignore + +SELECT extversion FROM pg_extension WHERE extname = 'pxf'; + extversion +------------ + 2.1 +(1 row) + +SHOW dynamic_library_path; + dynamic_library_path +---------------------- + $libdir +(1 row) +SELECT p.proname, p.prosrc, p.probin +FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) +WHERE d.deptype = 'e' AND e.extname = 'pxf' +ORDER BY 1; + proname | prosrc | probin +---------------------+------------------------------+---------------------------------- + pxf_read | pxfprotocol_import | $PXF_HOME/gpextable/pxf + pxf_validate | pxfprotocol_validate_urls | $PXF_HOME/gpextable/pxf + pxf_write | pxfprotocol_export | $PXF_HOME/gpextable/pxf + pxfdelimited_import | pxfdelimited_import | $PXF_HOME/gpextable/pxf + pxfwritable_export | gpdbwritableformatter_export | $PXF_HOME/gpextable/pxf + pxfwritable_import | gpdbwritableformatter_import | $PXF_HOME/gpextable/pxf +(6 rows) + +SELECT * FROM pxf_upgrade_test ORDER BY num; + name | num | dub | longnum | bool +--------+-----+-----+---------------+------ + row_1 | 1 | 1 | 100000000000 | f + row_2 | 2 | 2 | 200000000000 | t + row_3 | 3 | 3 | 300000000000 | f + row_4 | 4 | 4 | 400000000000 | t + row_5 | 5 | 5 | 500000000000 | f + row_6 | 6 | 6 | 600000000000 | t + row_7 | 7 | 7 | 700000000000 | f + row_8 | 8 | 8 | 800000000000 | t + row_9 | 9 | 9 | 900000000000 | f + row_10 | 10 | 10 | 1000000000000 | t +(10 rows) + +SELECT * FROM pxf_upgrade_test_multibyte ORDER BY num; + name | num | dub | longnum | bool +--------+-----+-----+---------------+------ + row_1 | 1 | 1 | 100000000000 | f + row_2 | 2 | 2 | 200000000000 | t + row_3 | 3 | 3 | 300000000000 | f + row_4 | 4 | 4 | 400000000000 | t + row_5 | 5 | 5 | 500000000000 | f + row_6 | 6 | 6 | 600000000000 | t + row_7 | 7 | 7 | 700000000000 | f + row_8 | 8 | 8 | 800000000000 | t + row_9 | 9 | 9 | 900000000000 | f + row_10 | 10 | 10 | 1000000000000 | t +(10 rows) diff --git a/automation/tincrepo/main/pxf/features/extension_tests/create_extension/runTest.py b/automation/tincrepo/main/pxf/features/extension_tests/create_extension/runTest.py new file mode 100644 index 0000000000..2a6aba5073 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/create_extension/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfCreateExtension(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/extension_tests/create_extension/sql/query01.sql b/automation/tincrepo/main/pxf/features/extension_tests/create_extension/sql/query01.sql new file mode 100644 index 0000000000..c9962dffec --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/create_extension/sql/query01.sql @@ -0,0 +1,28 @@ +-- @description query01 for PXF install test on small data +-- start_matchsubs +-- +-- m{.*/usr/local/pxf-(dev|gp\d).*} +-- s{/usr/local/pxf-(dev|gp\d)}{\$PXF_HOME} +-- +-- m{.*\$libdir/pxf.*} +-- s{\$libdir}{\$PXF_HOME/gpextable} +-- +-- end_matchsubs +-- start_ignore +\c pxfautomation_extension +-- end_ignore + +SELECT extversion FROM pg_extension WHERE extname = 'pxf'; + +SHOW dynamic_library_path; + +SELECT p.proname, p.prosrc, p.probin +FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) +WHERE d.deptype = 'e' AND e.extname = 'pxf' +ORDER BY 1; + +SELECT * FROM pxf_upgrade_test ORDER BY num; + +SELECT * FROM pxf_upgrade_test_multibyte ORDER BY num; diff --git a/automation/tincrepo/main/pxf/features/gpupgrade/step_3_after_running_pxf_post_gpupgrade/__init__.py b/automation/tincrepo/main/pxf/features/extension_tests/create_extension_rpm/__init__.py similarity index 100% rename from automation/tincrepo/main/pxf/features/gpupgrade/step_3_after_running_pxf_post_gpupgrade/__init__.py rename to automation/tincrepo/main/pxf/features/extension_tests/create_extension_rpm/__init__.py diff --git a/automation/tincrepo/main/pxf/features/extension_tests/create_extension_rpm/expected/query01.ans b/automation/tincrepo/main/pxf/features/extension_tests/create_extension_rpm/expected/query01.ans new file mode 100644 index 0000000000..417c714a8b --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/create_extension_rpm/expected/query01.ans @@ -0,0 +1,60 @@ +-- start_ignore +-- end_ignore +-- @description query01 for PXF install test on small data +-- start_matchsubs +-- +-- m{.*/usr/local/pxf-(dev|gp\d).*} +-- s{/usr/local/pxf-(dev|gp\d)}{\$PXF_HOME} +-- +-- m{.*\$libdir/pxf.*} +-- s{\$libdir}{\$PXF_HOME/gpextable} +-- +-- end_matchsubs +-- start_ignore +\c pxfautomation_extension +-- end_ignore + +SELECT extversion FROM pg_extension WHERE extname = 'pxf'; + extversion +------------ + 2.0 +(1 row) + +SHOW dynamic_library_path; + dynamic_library_path +---------------------- + $libdir +(1 row) +SELECT p.proname, p.prosrc, p.probin +FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) +WHERE d.deptype = 'e' AND e.extname = 'pxf' +ORDER BY 1; + proname | prosrc | probin +--------------------+------------------------------+---------------------------------- + pxf_read | pxfprotocol_import | $PXF_HOME/gpextable/pxf + pxf_validate | pxfprotocol_validate_urls | $PXF_HOME/gpextable/pxf + pxf_write | pxfprotocol_export | $PXF_HOME/gpextable/pxf + pxfwritable_export | gpdbwritableformatter_export | $PXF_HOME/gpextable/pxf + pxfwritable_import | gpdbwritableformatter_import | $PXF_HOME/gpextable/pxf +(5 rows) + +SELECT * FROM pxf_upgrade_test ORDER BY num; + name | num | dub | longnum | bool +--------+-----+-----+---------------+------ + row_1 | 1 | 1 | 100000000000 | f + row_2 | 2 | 2 | 200000000000 | t + row_3 | 3 | 3 | 300000000000 | f + row_4 | 4 | 4 | 400000000000 | t + row_5 | 5 | 5 | 500000000000 | f + row_6 | 6 | 6 | 600000000000 | t + row_7 | 7 | 7 | 700000000000 | f + row_8 | 8 | 8 | 800000000000 | t + row_9 | 9 | 9 | 900000000000 | f + row_10 | 10 | 10 | 1000000000000 | t +(10 rows) + +SELECT * FROM pxf_upgrade_test_multibyte ORDER BY num; +ERROR: formatter function "pxfdelimited_import" of type readable was not found +HINT: Create it with CREATE FUNCTION. diff --git a/automation/tincrepo/main/pxf/features/extension_tests/create_extension_rpm/runTest.py b/automation/tincrepo/main/pxf/features/extension_tests/create_extension_rpm/runTest.py new file mode 100644 index 0000000000..e58fbc474f --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/create_extension_rpm/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfCreateExtensionRpm(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/extension_tests/create_extension_rpm/sql/query01.sql b/automation/tincrepo/main/pxf/features/extension_tests/create_extension_rpm/sql/query01.sql new file mode 100644 index 0000000000..c9962dffec --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/create_extension_rpm/sql/query01.sql @@ -0,0 +1,28 @@ +-- @description query01 for PXF install test on small data +-- start_matchsubs +-- +-- m{.*/usr/local/pxf-(dev|gp\d).*} +-- s{/usr/local/pxf-(dev|gp\d)}{\$PXF_HOME} +-- +-- m{.*\$libdir/pxf.*} +-- s{\$libdir}{\$PXF_HOME/gpextable} +-- +-- end_matchsubs +-- start_ignore +\c pxfautomation_extension +-- end_ignore + +SELECT extversion FROM pg_extension WHERE extname = 'pxf'; + +SHOW dynamic_library_path; + +SELECT p.proname, p.prosrc, p.probin +FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) +WHERE d.deptype = 'e' AND e.extname = 'pxf' +ORDER BY 1; + +SELECT * FROM pxf_upgrade_test ORDER BY num; + +SELECT * FROM pxf_upgrade_test_multibyte ORDER BY num; diff --git a/automation/tincrepo/main/pxf/features/extension_tests/downgrade/__init__.py b/automation/tincrepo/main/pxf/features/extension_tests/downgrade/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/extension_tests/downgrade/step_1_create_extension/__init__.py b/automation/tincrepo/main/pxf/features/extension_tests/downgrade/step_1_create_extension/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/extension_tests/downgrade/step_1_create_extension/expected/query01.ans b/automation/tincrepo/main/pxf/features/extension_tests/downgrade/step_1_create_extension/expected/query01.ans new file mode 100644 index 0000000000..e142b6eade --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/downgrade/step_1_create_extension/expected/query01.ans @@ -0,0 +1,73 @@ +-- start_ignore +-- end_ignore +-- @description query01 for PXF downgrade test on small data +-- start_matchsubs +-- +-- m{.*/usr/local/pxf-(dev|gp\d).*} +-- s{/usr/local/pxf-(dev|gp\d)}{\$PXF_HOME} +-- +-- m{.*\$libdir/pxf.*} +-- s{\$libdir}{\$PXF_HOME/gpextable} +-- +-- end_matchsubs +-- start_ignore +\c pxfautomation_extension +-- end_ignore + +SELECT extversion FROM pg_extension WHERE extname = 'pxf'; + extversion +------------ + 2.1 +(1 row) + +SHOW dynamic_library_path; + dynamic_library_path +---------------------- + $libdir +(1 row) + +SELECT p.proname, p.prosrc, p.probin +FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) +WHERE d.deptype = 'e' AND e.extname = 'pxf' +ORDER BY 1; + proname | prosrc | probin +---------------------+------------------------------+---------------------------------- + pxf_read | pxfprotocol_import | $PXF_HOME/gpextable/pxf + pxf_validate | pxfprotocol_validate_urls | $PXF_HOME/gpextable/pxf + pxf_write | pxfprotocol_export | $PXF_HOME/gpextable/pxf + pxfdelimited_import | pxfdelimited_import | $PXF_HOME/gpextable/pxf + pxfwritable_export | gpdbwritableformatter_export | $PXF_HOME/gpextable/pxf + pxfwritable_import | gpdbwritableformatter_import | $PXF_HOME/gpextable/pxf +(6 rows) + +SELECT * FROM pxf_upgrade_test ORDER BY num; + name | num | dub | longnum | bool +--------+-----+-----+---------------+------ + row_1 | 1 | 1 | 100000000000 | f + row_2 | 2 | 2 | 200000000000 | t + row_3 | 3 | 3 | 300000000000 | f + row_4 | 4 | 4 | 400000000000 | t + row_5 | 5 | 5 | 500000000000 | f + row_6 | 6 | 6 | 600000000000 | t + row_7 | 7 | 7 | 700000000000 | f + row_8 | 8 | 8 | 800000000000 | t + row_9 | 9 | 9 | 900000000000 | f + row_10 | 10 | 10 | 1000000000000 | t +(10 rows) + +SELECT * FROM pxf_upgrade_test_multibyte ORDER BY num; + name | num | dub | longnum | bool +--------+-----+-----+---------------+------ + row_1 | 1 | 1 | 100000000000 | f + row_2 | 2 | 2 | 200000000000 | t + row_3 | 3 | 3 | 300000000000 | f + row_4 | 4 | 4 | 400000000000 | t + row_5 | 5 | 5 | 500000000000 | f + row_6 | 6 | 6 | 600000000000 | t + row_7 | 7 | 7 | 700000000000 | f + row_8 | 8 | 8 | 800000000000 | t + row_9 | 9 | 9 | 900000000000 | f + row_10 | 10 | 10 | 1000000000000 | t +(10 rows) diff --git a/automation/tincrepo/main/pxf/features/extension_tests/downgrade/step_1_create_extension/runTest.py b/automation/tincrepo/main/pxf/features/extension_tests/downgrade/step_1_create_extension/runTest.py new file mode 100644 index 0000000000..d3ea0c067f --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/downgrade/step_1_create_extension/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfDowngradeStep1CreateExtension(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/extension_tests/downgrade/step_1_create_extension/sql/query01.sql b/automation/tincrepo/main/pxf/features/extension_tests/downgrade/step_1_create_extension/sql/query01.sql new file mode 100644 index 0000000000..76edbb8211 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/downgrade/step_1_create_extension/sql/query01.sql @@ -0,0 +1,28 @@ +-- @description query01 for PXF downgrade test on small data +-- start_matchsubs +-- +-- m{.*/usr/local/pxf-(dev|gp\d).*} +-- s{/usr/local/pxf-(dev|gp\d)}{\$PXF_HOME} +-- +-- m{.*\$libdir/pxf.*} +-- s{\$libdir}{\$PXF_HOME/gpextable} +-- +-- end_matchsubs +-- start_ignore +\c pxfautomation_extension +-- end_ignore + +SELECT extversion FROM pg_extension WHERE extname = 'pxf'; + +SHOW dynamic_library_path; + +SELECT p.proname, p.prosrc, p.probin +FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) +WHERE d.deptype = 'e' AND e.extname = 'pxf' +ORDER BY 1; + +SELECT * FROM pxf_upgrade_test ORDER BY num; + +SELECT * FROM pxf_upgrade_test_multibyte ORDER BY num; diff --git a/automation/tincrepo/main/pxf/features/extension_tests/downgrade/step_2_after_alter_extension_downgrade/__init__.py b/automation/tincrepo/main/pxf/features/extension_tests/downgrade/step_2_after_alter_extension_downgrade/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/extension_tests/downgrade/step_2_after_alter_extension_downgrade/expected/query01.ans b/automation/tincrepo/main/pxf/features/extension_tests/downgrade/step_2_after_alter_extension_downgrade/expected/query01.ans new file mode 100644 index 0000000000..c51d284396 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/downgrade/step_2_after_alter_extension_downgrade/expected/query01.ans @@ -0,0 +1,67 @@ +-- start_ignore +-- end_ignore +-- @description query01 for PXF downgrade test on small data +-- start_matchsubs +-- +-- m{.*/usr/local/pxf-(dev|gp\d).*} +-- s{/usr/local/pxf-(dev|gp\d)}{\$PXF_HOME} +-- +-- m{.*\$libdir/pxf.*} +-- s{\$libdir}{\$PXF_HOME/gpextable} +-- +-- m{.*\"pxfdelimited_import\".*} +-- s{\"pxfdelimited_import\"}{pxfdelimited_import} +-- +-- m{.*found\.*} +-- s{found\.}{found} +-- +-- end_matchsubs +-- start_ignore +\c pxfautomation_eextnsion +-- end_ignore + +SELECT extversion FROM pg_extension WHERE extname = 'pxf'; + extversion +------------ + 2.0 +(1 row) + +SHOW dynamic_library_path; + dynamic_library_path +---------------------- + $libdir +(1 row) + +SELECT p.proname, p.prosrc, p.probin +FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) +WHERE d.deptype = 'e' AND e.extname = 'pxf' +ORDER BY 1; + proname | prosrc | probin +--------------------+------------------------------+---------------------------------- + pxf_read | pxfprotocol_import | $PXF_HOME/gpextable/pxf + pxf_validate | pxfprotocol_validate_urls | $PXF_HOME/gpextable/pxf + pxf_write | pxfprotocol_export | $PXF_HOME/gpextable/pxf + pxfwritable_export | gpdbwritableformatter_export | $PXF_HOME/gpextable/pxf + pxfwritable_import | gpdbwritableformatter_import | $PXF_HOME/gpextable/pxf +(5 rows) + +SELECT * FROM pxf_upgrade_test ORDER BY num; + name | num | dub | longnum | bool +--------+-----+-----+---------------+------ + row_1 | 1 | 1 | 100000000000 | f + row_2 | 2 | 2 | 200000000000 | t + row_3 | 3 | 3 | 300000000000 | f + row_4 | 4 | 4 | 400000000000 | t + row_5 | 5 | 5 | 500000000000 | f + row_6 | 6 | 6 | 600000000000 | t + row_7 | 7 | 7 | 700000000000 | f + row_8 | 8 | 8 | 800000000000 | t + row_9 | 9 | 9 | 900000000000 | f + row_10 | 10 | 10 | 1000000000000 | t +(10 rows) + +SELECT * FROM pxf_upgrade_test_multibyte ORDER BY num; +ERROR: formatter function "pxfdelimited_import" of type readable was not found +HINT: Create it with CREATE FUNCTION. diff --git a/automation/tincrepo/main/pxf/features/extension_tests/downgrade/step_2_after_alter_extension_downgrade/runTest.py b/automation/tincrepo/main/pxf/features/extension_tests/downgrade/step_2_after_alter_extension_downgrade/runTest.py new file mode 100644 index 0000000000..074c7f1e20 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/downgrade/step_2_after_alter_extension_downgrade/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfDowngradeStep2AlterExtensionPxf(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/extension_tests/downgrade/step_2_after_alter_extension_downgrade/sql/query01.sql b/automation/tincrepo/main/pxf/features/extension_tests/downgrade/step_2_after_alter_extension_downgrade/sql/query01.sql new file mode 100644 index 0000000000..8a87c225e2 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/downgrade/step_2_after_alter_extension_downgrade/sql/query01.sql @@ -0,0 +1,34 @@ +-- @description query01 for PXF downgrade test on small data +-- start_matchsubs +-- +-- m{.*/usr/local/pxf-(dev|gp\d).*} +-- s{/usr/local/pxf-(dev|gp\d)}{\$PXF_HOME} +-- +-- m{.*\$libdir/pxf.*} +-- s{\$libdir}{\$PXF_HOME/gpextable} +-- +-- m{.*\"pxfdelimited_import\".*} +-- s{\"pxfdelimited_import\"}{pxfdelimited_import} +-- +-- m{.*found\.*} +-- s{found\.}{found} +-- +-- end_matchsubs +-- start_ignore +\c pxfautomation_extension +-- end_ignore + +SELECT extversion FROM pg_extension WHERE extname = 'pxf'; + +SHOW dynamic_library_path; + +SELECT p.proname, p.prosrc, p.probin +FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) +WHERE d.deptype = 'e' AND e.extname = 'pxf' +ORDER BY 1; + +SELECT * FROM pxf_upgrade_test ORDER BY num; + +SELECT * FROM pxf_upgrade_test_multibyte ORDER BY num; diff --git a/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/__init__.py b/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_1_check_extension/__init__.py b/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_1_check_extension/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_1_check_extension/expected/query01.ans b/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_1_check_extension/expected/query01.ans new file mode 100644 index 0000000000..e142b6eade --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_1_check_extension/expected/query01.ans @@ -0,0 +1,73 @@ +-- start_ignore +-- end_ignore +-- @description query01 for PXF downgrade test on small data +-- start_matchsubs +-- +-- m{.*/usr/local/pxf-(dev|gp\d).*} +-- s{/usr/local/pxf-(dev|gp\d)}{\$PXF_HOME} +-- +-- m{.*\$libdir/pxf.*} +-- s{\$libdir}{\$PXF_HOME/gpextable} +-- +-- end_matchsubs +-- start_ignore +\c pxfautomation_extension +-- end_ignore + +SELECT extversion FROM pg_extension WHERE extname = 'pxf'; + extversion +------------ + 2.1 +(1 row) + +SHOW dynamic_library_path; + dynamic_library_path +---------------------- + $libdir +(1 row) + +SELECT p.proname, p.prosrc, p.probin +FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) +WHERE d.deptype = 'e' AND e.extname = 'pxf' +ORDER BY 1; + proname | prosrc | probin +---------------------+------------------------------+---------------------------------- + pxf_read | pxfprotocol_import | $PXF_HOME/gpextable/pxf + pxf_validate | pxfprotocol_validate_urls | $PXF_HOME/gpextable/pxf + pxf_write | pxfprotocol_export | $PXF_HOME/gpextable/pxf + pxfdelimited_import | pxfdelimited_import | $PXF_HOME/gpextable/pxf + pxfwritable_export | gpdbwritableformatter_export | $PXF_HOME/gpextable/pxf + pxfwritable_import | gpdbwritableformatter_import | $PXF_HOME/gpextable/pxf +(6 rows) + +SELECT * FROM pxf_upgrade_test ORDER BY num; + name | num | dub | longnum | bool +--------+-----+-----+---------------+------ + row_1 | 1 | 1 | 100000000000 | f + row_2 | 2 | 2 | 200000000000 | t + row_3 | 3 | 3 | 300000000000 | f + row_4 | 4 | 4 | 400000000000 | t + row_5 | 5 | 5 | 500000000000 | f + row_6 | 6 | 6 | 600000000000 | t + row_7 | 7 | 7 | 700000000000 | f + row_8 | 8 | 8 | 800000000000 | t + row_9 | 9 | 9 | 900000000000 | f + row_10 | 10 | 10 | 1000000000000 | t +(10 rows) + +SELECT * FROM pxf_upgrade_test_multibyte ORDER BY num; + name | num | dub | longnum | bool +--------+-----+-----+---------------+------ + row_1 | 1 | 1 | 100000000000 | f + row_2 | 2 | 2 | 200000000000 | t + row_3 | 3 | 3 | 300000000000 | f + row_4 | 4 | 4 | 400000000000 | t + row_5 | 5 | 5 | 500000000000 | f + row_6 | 6 | 6 | 600000000000 | t + row_7 | 7 | 7 | 700000000000 | f + row_8 | 8 | 8 | 800000000000 | t + row_9 | 9 | 9 | 900000000000 | f + row_10 | 10 | 10 | 1000000000000 | t +(10 rows) diff --git a/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_1_check_extension/runTest.py b/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_1_check_extension/runTest.py new file mode 100644 index 0000000000..018a9aac88 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_1_check_extension/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfDowngradeStep1CheckExtension(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_1_check_extension/sql/query01.sql b/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_1_check_extension/sql/query01.sql new file mode 100644 index 0000000000..76edbb8211 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_1_check_extension/sql/query01.sql @@ -0,0 +1,28 @@ +-- @description query01 for PXF downgrade test on small data +-- start_matchsubs +-- +-- m{.*/usr/local/pxf-(dev|gp\d).*} +-- s{/usr/local/pxf-(dev|gp\d)}{\$PXF_HOME} +-- +-- m{.*\$libdir/pxf.*} +-- s{\$libdir}{\$PXF_HOME/gpextable} +-- +-- end_matchsubs +-- start_ignore +\c pxfautomation_extension +-- end_ignore + +SELECT extversion FROM pg_extension WHERE extname = 'pxf'; + +SHOW dynamic_library_path; + +SELECT p.proname, p.prosrc, p.probin +FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) +WHERE d.deptype = 'e' AND e.extname = 'pxf' +ORDER BY 1; + +SELECT * FROM pxf_upgrade_test ORDER BY num; + +SELECT * FROM pxf_upgrade_test_multibyte ORDER BY num; diff --git a/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_2_after_alter_extension_downgrade/__init__.py b/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_2_after_alter_extension_downgrade/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_2_after_alter_extension_downgrade/expected/query01.ans b/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_2_after_alter_extension_downgrade/expected/query01.ans new file mode 100644 index 0000000000..c2b26b82bb --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_2_after_alter_extension_downgrade/expected/query01.ans @@ -0,0 +1,67 @@ +-- start_ignore +-- end_ignore +-- @description query01 for PXF downgrade test on small data +-- start_matchsubs +-- +-- m{.*/usr/local/pxf-(dev|gp\d).*} +-- s{/usr/local/pxf-(dev|gp\d)}{\$PXF_HOME} +-- +-- m{.*\$libdir/pxf.*} +-- s{\$libdir}{\$PXF_HOME/gpextable} +-- +-- m{.*\"pxfdelimited_import\".*} +-- s{\"pxfdelimited_import\"}{pxfdelimited_import} +-- +-- m{.*found\.*} +-- s{found\.}{found} +-- +-- end_matchsubs +-- start_ignore +\c pxfautomation_extension +-- end_ignore + +SELECT extversion FROM pg_extension WHERE extname = 'pxf'; + extversion +------------ + 2.0 +(1 row) + +SHOW dynamic_library_path; + dynamic_library_path +---------------------- + $libdir +(1 row) + +SELECT p.proname, p.prosrc, p.probin +FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) +WHERE d.deptype = 'e' AND e.extname = 'pxf' +ORDER BY 1; + proname | prosrc | probin +--------------------+------------------------------+---------------------------------- + pxf_read | pxfprotocol_import | $PXF_HOME/gpextable/pxf + pxf_validate | pxfprotocol_validate_urls | $PXF_HOME/gpextable/pxf + pxf_write | pxfprotocol_export | $PXF_HOME/gpextable/pxf + pxfwritable_export | gpdbwritableformatter_export | $PXF_HOME/gpextable/pxf + pxfwritable_import | gpdbwritableformatter_import | $PXF_HOME/gpextable/pxf +(5 rows) + +SELECT * FROM pxf_upgrade_test ORDER BY num; + name | num | dub | longnum | bool +--------+-----+-----+---------------+------ + row_1 | 1 | 1 | 100000000000 | f + row_2 | 2 | 2 | 200000000000 | t + row_3 | 3 | 3 | 300000000000 | f + row_4 | 4 | 4 | 400000000000 | t + row_5 | 5 | 5 | 500000000000 | f + row_6 | 6 | 6 | 600000000000 | t + row_7 | 7 | 7 | 700000000000 | f + row_8 | 8 | 8 | 800000000000 | t + row_9 | 9 | 9 | 900000000000 | f + row_10 | 10 | 10 | 1000000000000 | t +(10 rows) + +SELECT * FROM pxf_upgrade_test_multibyte ORDER BY num; +ERROR: formatter function "pxfdelimited_import" of type readable was not found +HINT: Create it with CREATE FUNCTION. diff --git a/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_2_after_alter_extension_downgrade/runTest.py b/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_2_after_alter_extension_downgrade/runTest.py new file mode 100644 index 0000000000..074c7f1e20 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_2_after_alter_extension_downgrade/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfDowngradeStep2AlterExtensionPxf(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_2_after_alter_extension_downgrade/sql/query01.sql b/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_2_after_alter_extension_downgrade/sql/query01.sql new file mode 100644 index 0000000000..8a87c225e2 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_2_after_alter_extension_downgrade/sql/query01.sql @@ -0,0 +1,34 @@ +-- @description query01 for PXF downgrade test on small data +-- start_matchsubs +-- +-- m{.*/usr/local/pxf-(dev|gp\d).*} +-- s{/usr/local/pxf-(dev|gp\d)}{\$PXF_HOME} +-- +-- m{.*\$libdir/pxf.*} +-- s{\$libdir}{\$PXF_HOME/gpextable} +-- +-- m{.*\"pxfdelimited_import\".*} +-- s{\"pxfdelimited_import\"}{pxfdelimited_import} +-- +-- m{.*found\.*} +-- s{found\.}{found} +-- +-- end_matchsubs +-- start_ignore +\c pxfautomation_extension +-- end_ignore + +SELECT extversion FROM pg_extension WHERE extname = 'pxf'; + +SHOW dynamic_library_path; + +SELECT p.proname, p.prosrc, p.probin +FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) +WHERE d.deptype = 'e' AND e.extname = 'pxf' +ORDER BY 1; + +SELECT * FROM pxf_upgrade_test ORDER BY num; + +SELECT * FROM pxf_upgrade_test_multibyte ORDER BY num; diff --git a/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_3_after_alter_extension_upgrade/__init__.py b/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_3_after_alter_extension_upgrade/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_3_after_alter_extension_upgrade/expected/query01.ans b/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_3_after_alter_extension_upgrade/expected/query01.ans new file mode 100644 index 0000000000..e142b6eade --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_3_after_alter_extension_upgrade/expected/query01.ans @@ -0,0 +1,73 @@ +-- start_ignore +-- end_ignore +-- @description query01 for PXF downgrade test on small data +-- start_matchsubs +-- +-- m{.*/usr/local/pxf-(dev|gp\d).*} +-- s{/usr/local/pxf-(dev|gp\d)}{\$PXF_HOME} +-- +-- m{.*\$libdir/pxf.*} +-- s{\$libdir}{\$PXF_HOME/gpextable} +-- +-- end_matchsubs +-- start_ignore +\c pxfautomation_extension +-- end_ignore + +SELECT extversion FROM pg_extension WHERE extname = 'pxf'; + extversion +------------ + 2.1 +(1 row) + +SHOW dynamic_library_path; + dynamic_library_path +---------------------- + $libdir +(1 row) + +SELECT p.proname, p.prosrc, p.probin +FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) +WHERE d.deptype = 'e' AND e.extname = 'pxf' +ORDER BY 1; + proname | prosrc | probin +---------------------+------------------------------+---------------------------------- + pxf_read | pxfprotocol_import | $PXF_HOME/gpextable/pxf + pxf_validate | pxfprotocol_validate_urls | $PXF_HOME/gpextable/pxf + pxf_write | pxfprotocol_export | $PXF_HOME/gpextable/pxf + pxfdelimited_import | pxfdelimited_import | $PXF_HOME/gpextable/pxf + pxfwritable_export | gpdbwritableformatter_export | $PXF_HOME/gpextable/pxf + pxfwritable_import | gpdbwritableformatter_import | $PXF_HOME/gpextable/pxf +(6 rows) + +SELECT * FROM pxf_upgrade_test ORDER BY num; + name | num | dub | longnum | bool +--------+-----+-----+---------------+------ + row_1 | 1 | 1 | 100000000000 | f + row_2 | 2 | 2 | 200000000000 | t + row_3 | 3 | 3 | 300000000000 | f + row_4 | 4 | 4 | 400000000000 | t + row_5 | 5 | 5 | 500000000000 | f + row_6 | 6 | 6 | 600000000000 | t + row_7 | 7 | 7 | 700000000000 | f + row_8 | 8 | 8 | 800000000000 | t + row_9 | 9 | 9 | 900000000000 | f + row_10 | 10 | 10 | 1000000000000 | t +(10 rows) + +SELECT * FROM pxf_upgrade_test_multibyte ORDER BY num; + name | num | dub | longnum | bool +--------+-----+-----+---------------+------ + row_1 | 1 | 1 | 100000000000 | f + row_2 | 2 | 2 | 200000000000 | t + row_3 | 3 | 3 | 300000000000 | f + row_4 | 4 | 4 | 400000000000 | t + row_5 | 5 | 5 | 500000000000 | f + row_6 | 6 | 6 | 600000000000 | t + row_7 | 7 | 7 | 700000000000 | f + row_8 | 8 | 8 | 800000000000 | t + row_9 | 9 | 9 | 900000000000 | f + row_10 | 10 | 10 | 1000000000000 | t +(10 rows) diff --git a/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_3_after_alter_extension_upgrade/runTest.py b/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_3_after_alter_extension_upgrade/runTest.py new file mode 100644 index 0000000000..c04be95838 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_3_after_alter_extension_upgrade/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfDowngradeStep3UpgradeExtension(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_3_after_alter_extension_upgrade/sql/query01.sql b/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_3_after_alter_extension_upgrade/sql/query01.sql new file mode 100644 index 0000000000..76edbb8211 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/downgrade_then_upgrade/step_3_after_alter_extension_upgrade/sql/query01.sql @@ -0,0 +1,28 @@ +-- @description query01 for PXF downgrade test on small data +-- start_matchsubs +-- +-- m{.*/usr/local/pxf-(dev|gp\d).*} +-- s{/usr/local/pxf-(dev|gp\d)}{\$PXF_HOME} +-- +-- m{.*\$libdir/pxf.*} +-- s{\$libdir}{\$PXF_HOME/gpextable} +-- +-- end_matchsubs +-- start_ignore +\c pxfautomation_extension +-- end_ignore + +SELECT extversion FROM pg_extension WHERE extname = 'pxf'; + +SHOW dynamic_library_path; + +SELECT p.proname, p.prosrc, p.probin +FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) +WHERE d.deptype = 'e' AND e.extname = 'pxf' +ORDER BY 1; + +SELECT * FROM pxf_upgrade_test ORDER BY num; + +SELECT * FROM pxf_upgrade_test_multibyte ORDER BY num; diff --git a/automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/__init__.py b/automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/step_1_create_extension_with_older_pxf_version/__init__.py b/automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/step_1_create_extension_with_older_pxf_version/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/step_1_create_extension_with_older_pxf_version/expected/query01.ans b/automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/step_1_create_extension_with_older_pxf_version/expected/query01.ans new file mode 100644 index 0000000000..406e013d71 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/step_1_create_extension_with_older_pxf_version/expected/query01.ans @@ -0,0 +1,57 @@ +-- start_ignore +-- end_ignore +-- @description query01 for PXF upgrade with explicit version test on small data +-- start_matchsubs +-- +-- m{.*/usr/local/pxf-(dev|gp\d).*} +-- s{/usr/local/pxf-(dev|gp\d)}{\$PXF_HOME} +-- +-- m{.*\$libdir/pxf.*} +-- s{\$libdir}{\$PXF_HOME/gpextable} +-- +-- end_matchsubs +-- start_ignore +\c pxfautomation_extension +-- end_ignore + +SELECT extversion FROM pg_extension WHERE extname = 'pxf'; + extversion +------------ + 2.0 +(1 row) + +SHOW dynamic_library_path; + dynamic_library_path +---------------------- + $libdir +(1 row) + +SELECT p.proname, p.prosrc, p.probin +FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) +WHERE d.deptype = 'e' AND e.extname = 'pxf' +ORDER BY 1; + proname | prosrc | probin +--------------------+------------------------------+---------------------------------- + pxf_read | pxfprotocol_import | $PXF_HOME/gpextable/pxf + pxf_validate | pxfprotocol_validate_urls | $PXF_HOME/gpextable/pxf + pxf_write | pxfprotocol_export | $PXF_HOME/gpextable/pxf + pxfwritable_export | gpdbwritableformatter_export | $PXF_HOME/gpextable/pxf + pxfwritable_import | gpdbwritableformatter_import | $PXF_HOME/gpextable/pxf +(5 rows) + +SELECT * FROM pxf_upgrade_test ORDER BY num; + name | num | dub | longnum | bool +--------+-----+-----+---------------+------ + row_1 | 1 | 1 | 100000000000 | f + row_2 | 2 | 2 | 200000000000 | t + row_3 | 3 | 3 | 300000000000 | f + row_4 | 4 | 4 | 400000000000 | t + row_5 | 5 | 5 | 500000000000 | f + row_6 | 6 | 6 | 600000000000 | t + row_7 | 7 | 7 | 700000000000 | f + row_8 | 8 | 8 | 800000000000 | t + row_9 | 9 | 9 | 900000000000 | f + row_10 | 10 | 10 | 1000000000000 | t +(10 rows) diff --git a/automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/step_1_create_extension_with_older_pxf_version/runTest.py b/automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/step_1_create_extension_with_older_pxf_version/runTest.py new file mode 100644 index 0000000000..b5326cdcbe --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/step_1_create_extension_with_older_pxf_version/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfUpgradeStep1CreateExtensionExplicitVersion(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/step_1_create_extension_with_older_pxf_version/sql/query01.sql b/automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/step_1_create_extension_with_older_pxf_version/sql/query01.sql new file mode 100644 index 0000000000..48ba18a339 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/step_1_create_extension_with_older_pxf_version/sql/query01.sql @@ -0,0 +1,26 @@ +-- @description query01 for PXF upgrade with explicit version test on small data +-- start_matchsubs +-- +-- m{.*/usr/local/pxf-(dev|gp\d).*} +-- s{/usr/local/pxf-(dev|gp\d)}{\$PXF_HOME} +-- +-- m{.*\$libdir/pxf.*} +-- s{\$libdir}{\$PXF_HOME/gpextable} +-- +-- end_matchsubs +-- start_ignore +\c pxfautomation_extension +-- end_ignore + +SELECT extversion FROM pg_extension WHERE extname = 'pxf'; + +SHOW dynamic_library_path; + +SELECT p.proname, p.prosrc, p.probin +FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) +WHERE d.deptype = 'e' AND e.extname = 'pxf' +ORDER BY 1; + +SELECT * FROM pxf_upgrade_test ORDER BY num; diff --git a/automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/step_2_after_alter_extension/__init__.py b/automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/step_2_after_alter_extension/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/step_2_after_alter_extension/expected/query01.ans b/automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/step_2_after_alter_extension/expected/query01.ans new file mode 100644 index 0000000000..b70f65c76d --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/step_2_after_alter_extension/expected/query01.ans @@ -0,0 +1,73 @@ +-- start_ignore +-- end_ignore +-- @description query01 for PXF upgrade with explicit version test on small data +-- start_matchsubs +-- +-- m{.*/usr/local/pxf-(dev|gp\d).*} +-- s{/usr/local/pxf-(dev|gp\d)}{\$PXF_HOME} +-- +-- m{.*\$libdir/pxf.*} +-- s{\$libdir}{\$PXF_HOME/gpextable} +-- +-- end_matchsubs +-- start_ignore +\c pxfautomation_extension +-- end_ignore + +SELECT extversion FROM pg_extension WHERE extname = 'pxf'; + extversion +------------ + 2.1 +(1 row) + +SHOW dynamic_library_path; + dynamic_library_path +---------------------- + $libdir +(1 row) + +SELECT p.proname, p.prosrc, p.probin +FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) +WHERE d.deptype = 'e' AND e.extname = 'pxf' +ORDER BY 1; + proname | prosrc | probin +---------------------+------------------------------+---------------------------------- + pxf_read | pxfprotocol_import | $PXF_HOME/gpextable/pxf + pxf_validate | pxfprotocol_validate_urls | $PXF_HOME/gpextable/pxf + pxf_write | pxfprotocol_export | $PXF_HOME/gpextable/pxf + pxfdelimited_import | pxfdelimited_import | $PXF_HOME/gpextable/pxf + pxfwritable_export | gpdbwritableformatter_export | $PXF_HOME/gpextable/pxf + pxfwritable_import | gpdbwritableformatter_import | $PXF_HOME/gpextable/pxf +(6 rows) + +SELECT * FROM pxf_upgrade_test ORDER BY num; + name | num | dub | longnum | bool +--------+-----+-----+---------------+------ + row_1 | 1 | 1 | 100000000000 | f + row_2 | 2 | 2 | 200000000000 | t + row_3 | 3 | 3 | 300000000000 | f + row_4 | 4 | 4 | 400000000000 | t + row_5 | 5 | 5 | 500000000000 | f + row_6 | 6 | 6 | 600000000000 | t + row_7 | 7 | 7 | 700000000000 | f + row_8 | 8 | 8 | 800000000000 | t + row_9 | 9 | 9 | 900000000000 | f + row_10 | 10 | 10 | 1000000000000 | t +(10 rows) + +SELECT * FROM pxf_upgrade_test_multibyte ORDER BY num; + name | num | dub | longnum | bool +--------+-----+-----+---------------+------ + row_1 | 1 | 1 | 100000000000 | f + row_2 | 2 | 2 | 200000000000 | t + row_3 | 3 | 3 | 300000000000 | f + row_4 | 4 | 4 | 400000000000 | t + row_5 | 5 | 5 | 500000000000 | f + row_6 | 6 | 6 | 600000000000 | t + row_7 | 7 | 7 | 700000000000 | f + row_8 | 8 | 8 | 800000000000 | t + row_9 | 9 | 9 | 900000000000 | f + row_10 | 10 | 10 | 1000000000000 | t +(10 rows) diff --git a/automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/step_2_after_alter_extension/runTest.py b/automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/step_2_after_alter_extension/runTest.py new file mode 100644 index 0000000000..a045020533 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/step_2_after_alter_extension/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfUpgradeStep2AfterAlterExtensionExplicitVersion(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/step_2_after_alter_extension/sql/query01.sql b/automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/step_2_after_alter_extension/sql/query01.sql new file mode 100644 index 0000000000..c3075e9b9c --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/explicit_upgrade/step_2_after_alter_extension/sql/query01.sql @@ -0,0 +1,28 @@ +-- @description query01 for PXF upgrade with explicit version test on small data +-- start_matchsubs +-- +-- m{.*/usr/local/pxf-(dev|gp\d).*} +-- s{/usr/local/pxf-(dev|gp\d)}{\$PXF_HOME} +-- +-- m{.*\$libdir/pxf.*} +-- s{\$libdir}{\$PXF_HOME/gpextable} +-- +-- end_matchsubs +-- start_ignore +\c pxfautomation_extension +-- end_ignore + +SELECT extversion FROM pg_extension WHERE extname = 'pxf'; + +SHOW dynamic_library_path; + +SELECT p.proname, p.prosrc, p.probin +FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) +WHERE d.deptype = 'e' AND e.extname = 'pxf' +ORDER BY 1; + +SELECT * FROM pxf_upgrade_test ORDER BY num; + +SELECT * FROM pxf_upgrade_test_multibyte ORDER BY num; diff --git a/automation/tincrepo/main/pxf/features/extension_tests/upgrade/__init__.py b/automation/tincrepo/main/pxf/features/extension_tests/upgrade/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/extension_tests/upgrade/step_1_create_extension_with_older_pxf_version/__init__.py b/automation/tincrepo/main/pxf/features/extension_tests/upgrade/step_1_create_extension_with_older_pxf_version/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/extension_tests/upgrade/step_1_create_extension_with_older_pxf_version/expected/query01.ans b/automation/tincrepo/main/pxf/features/extension_tests/upgrade/step_1_create_extension_with_older_pxf_version/expected/query01.ans new file mode 100644 index 0000000000..02dfffac39 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/upgrade/step_1_create_extension_with_older_pxf_version/expected/query01.ans @@ -0,0 +1,58 @@ +-- start_ignore +-- end_ignore +-- @description query01 for PXF gpupgrade test on small data +-- start_matchsubs +-- +-- m{.*/usr/local/pxf-(dev|gp\d).*} +-- s{/usr/local/pxf-(dev|gp\d)}{\$PXF_HOME} +-- +-- m{.*\$libdir/pxf.*} +-- s{\$libdir}{\$PXF_HOME/gpextable} +-- +-- end_matchsubs +-- @description query01 for PXF upgrade test on small data +-- start_ignore +\c pxfautomation_extension +-- end_ignore + +SELECT extversion FROM pg_extension WHERE extname = 'pxf'; + extversion +------------ + 2.0 +(1 row) + +SHOW dynamic_library_path; + dynamic_library_path +---------------------- + $libdir +(1 row) + +SELECT p.proname, p.prosrc, p.probin +FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) +WHERE d.deptype = 'e' AND e.extname = 'pxf' +ORDER BY 1; + proname | prosrc | probin +--------------------+------------------------------+---------------------------------- + pxf_read | pxfprotocol_import | $PXF_HOME/gpextable/pxf + pxf_validate | pxfprotocol_validate_urls | $PXF_HOME/gpextable/pxf + pxf_write | pxfprotocol_export | $PXF_HOME/gpextable/pxf + pxfwritable_export | gpdbwritableformatter_export | $PXF_HOME/gpextable/pxf + pxfwritable_import | gpdbwritableformatter_import | $PXF_HOME/gpextable/pxf +(5 rows) + +SELECT * FROM pxf_upgrade_test ORDER BY num; + name | num | dub | longnum | bool +--------+-----+-----+---------------+------ + row_1 | 1 | 1 | 100000000000 | f + row_2 | 2 | 2 | 200000000000 | t + row_3 | 3 | 3 | 300000000000 | f + row_4 | 4 | 4 | 400000000000 | t + row_5 | 5 | 5 | 500000000000 | f + row_6 | 6 | 6 | 600000000000 | t + row_7 | 7 | 7 | 700000000000 | f + row_8 | 8 | 8 | 800000000000 | t + row_9 | 9 | 9 | 900000000000 | f + row_10 | 10 | 10 | 1000000000000 | t +(10 rows) diff --git a/automation/tincrepo/main/pxf/features/extension_tests/upgrade/step_1_create_extension_with_older_pxf_version/runTest.py b/automation/tincrepo/main/pxf/features/extension_tests/upgrade/step_1_create_extension_with_older_pxf_version/runTest.py new file mode 100644 index 0000000000..c51b01210f --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/upgrade/step_1_create_extension_with_older_pxf_version/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfUpgradeStep1CreateExtension(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/extension_tests/upgrade/step_1_create_extension_with_older_pxf_version/sql/query01.sql b/automation/tincrepo/main/pxf/features/extension_tests/upgrade/step_1_create_extension_with_older_pxf_version/sql/query01.sql new file mode 100644 index 0000000000..8c31dcf16f --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/upgrade/step_1_create_extension_with_older_pxf_version/sql/query01.sql @@ -0,0 +1,26 @@ +-- @description query01 for PXF gpupgrade test on small data +-- start_matchsubs +-- +-- m{.*/usr/local/pxf-(dev|gp\d).*} +-- s{/usr/local/pxf-(dev|gp\d)}{\$PXF_HOME} +-- +-- m{.*\$libdir/pxf.*} +-- s{\$libdir}{\$PXF_HOME/gpextable} +-- +-- end_matchsubs +-- start_ignore +\c pxfautomation_extension +-- end_ignore + +SELECT extversion FROM pg_extension WHERE extname = 'pxf'; + +SHOW dynamic_library_path; + +SELECT p.proname, p.prosrc, p.probin +FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) +WHERE d.deptype = 'e' AND e.extname = 'pxf' +ORDER BY 1; + +SELECT * FROM pxf_upgrade_test ORDER BY num; diff --git a/automation/tincrepo/main/pxf/features/extension_tests/upgrade/step_2_after_alter_extension/__init__.py b/automation/tincrepo/main/pxf/features/extension_tests/upgrade/step_2_after_alter_extension/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/extension_tests/upgrade/step_2_after_alter_extension/expected/query01.ans b/automation/tincrepo/main/pxf/features/extension_tests/upgrade/step_2_after_alter_extension/expected/query01.ans new file mode 100644 index 0000000000..9f87b291b6 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/upgrade/step_2_after_alter_extension/expected/query01.ans @@ -0,0 +1,73 @@ +-- start_ignore +-- end_ignore +-- @description query01 for PXF upgrade test on small data +-- start_matchsubs +-- +-- m{.*/usr/local/pxf-(dev|gp\d).*} +-- s{/usr/local/pxf-(dev|gp\d)}{\$PXF_HOME} +-- +-- m{.*\$libdir/pxf.*} +-- s{\$libdir}{\$PXF_HOME/gpextable} +-- +-- end_matchsubs +-- start_ignore +\c pxfautomation_extension +-- end_ignore + +SELECT extversion FROM pg_extension WHERE extname = 'pxf'; + extversion +------------ + 2.1 +(1 row) + +SHOW dynamic_library_path; + dynamic_library_path +---------------------- + $libdir +(1 row) + +SELECT p.proname, p.prosrc, p.probin +FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) +WHERE d.deptype = 'e' AND e.extname = 'pxf' +ORDER BY 1; + proname | prosrc | probin +---------------------+------------------------------+---------------------------------- + pxf_read | pxfprotocol_import | $PXF_HOME/gpextable/pxf + pxf_validate | pxfprotocol_validate_urls | $PXF_HOME/gpextable/pxf + pxf_write | pxfprotocol_export | $PXF_HOME/gpextable/pxf + pxfdelimited_import | pxfdelimited_import | $PXF_HOME/gpextable/pxf + pxfwritable_export | gpdbwritableformatter_export | $PXF_HOME/gpextable/pxf + pxfwritable_import | gpdbwritableformatter_import | $PXF_HOME/gpextable/pxf +(6 rows) + +SELECT * FROM pxf_upgrade_test ORDER BY num; + name | num | dub | longnum | bool +--------+-----+-----+---------------+------ + row_1 | 1 | 1 | 100000000000 | f + row_2 | 2 | 2 | 200000000000 | t + row_3 | 3 | 3 | 300000000000 | f + row_4 | 4 | 4 | 400000000000 | t + row_5 | 5 | 5 | 500000000000 | f + row_6 | 6 | 6 | 600000000000 | t + row_7 | 7 | 7 | 700000000000 | f + row_8 | 8 | 8 | 800000000000 | t + row_9 | 9 | 9 | 900000000000 | f + row_10 | 10 | 10 | 1000000000000 | t +(10 rows) + +SELECT * FROM pxf_upgrade_test_multibyte ORDER BY num; + name | num | dub | longnum | bool +--------+-----+-----+---------------+------ + row_1 | 1 | 1 | 100000000000 | f + row_2 | 2 | 2 | 200000000000 | t + row_3 | 3 | 3 | 300000000000 | f + row_4 | 4 | 4 | 400000000000 | t + row_5 | 5 | 5 | 500000000000 | f + row_6 | 6 | 6 | 600000000000 | t + row_7 | 7 | 7 | 700000000000 | f + row_8 | 8 | 8 | 800000000000 | t + row_9 | 9 | 9 | 900000000000 | f + row_10 | 10 | 10 | 1000000000000 | t +(10 rows) diff --git a/automation/tincrepo/main/pxf/features/extension_tests/upgrade/step_2_after_alter_extension/runTest.py b/automation/tincrepo/main/pxf/features/extension_tests/upgrade/step_2_after_alter_extension/runTest.py new file mode 100644 index 0000000000..b5c292ab01 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/upgrade/step_2_after_alter_extension/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfUpgradeStep2AfterAlterExtension(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/extension_tests/upgrade/step_2_after_alter_extension/sql/query01.sql b/automation/tincrepo/main/pxf/features/extension_tests/upgrade/step_2_after_alter_extension/sql/query01.sql new file mode 100644 index 0000000000..5edb69e59b --- /dev/null +++ b/automation/tincrepo/main/pxf/features/extension_tests/upgrade/step_2_after_alter_extension/sql/query01.sql @@ -0,0 +1,28 @@ +-- @description query01 for PXF upgrade test on small data +-- start_matchsubs +-- +-- m{.*/usr/local/pxf-(dev|gp\d).*} +-- s{/usr/local/pxf-(dev|gp\d)}{\$PXF_HOME} +-- +-- m{.*\$libdir/pxf.*} +-- s{\$libdir}{\$PXF_HOME/gpextable} +-- +-- end_matchsubs +-- start_ignore +\c pxfautomation_extension +-- end_ignore + +SELECT extversion FROM pg_extension WHERE extname = 'pxf'; + +SHOW dynamic_library_path; + +SELECT p.proname, p.prosrc, p.probin +FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) +WHERE d.deptype = 'e' AND e.extname = 'pxf' +ORDER BY 1; + +SELECT * FROM pxf_upgrade_test ORDER BY num; + +SELECT * FROM pxf_upgrade_test_multibyte ORDER BY num; diff --git a/automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/__init__.py b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_1_before_running_pxf_pre_gpupgrade/__init__.py b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_1_before_running_pxf_pre_gpupgrade/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/gpupgrade/step_1_before_running_pxf_pre_gpupgrade/expected/query01.ans b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_1_before_running_pxf_pre_gpupgrade/expected/query01.ans similarity index 87% rename from automation/tincrepo/main/pxf/features/gpupgrade/step_1_before_running_pxf_pre_gpupgrade/expected/query01.ans rename to automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_1_before_running_pxf_pre_gpupgrade/expected/query01.ans index 9963429d73..b5720ece84 100644 --- a/automation/tincrepo/main/pxf/features/gpupgrade/step_1_before_running_pxf_pre_gpupgrade/expected/query01.ans +++ b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_1_before_running_pxf_pre_gpupgrade/expected/query01.ans @@ -7,6 +7,12 @@ -- s{/usr/local/pxf-(dev|gp\d)}{\$PXF_HOME} -- -- end_matchsubs +SELECT extname, extversion FROM pg_catalog.pg_extension WHERE extname = 'pxf'; + extname | extversion +---------+------------ + pxf | 2.0 +(1 row) + SELECT * FROM pxf_gpupgrade_test; name | num | dub | longnum | bool --------+-----+-----+---------------+------ @@ -34,7 +40,7 @@ FROM pg_catalog.pg_extension AS e INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) WHERE d.deptype = 'e' AND e.extname = 'pxf' ORDER BY 1; - proname | prosrc | probin + proname | prosrc | probin --------------------+------------------------------+---------------------------------- pxf_read | pxfprotocol_import | $PXF_HOME/gpextable/pxf pxf_validate | pxfprotocol_validate_urls | $PXF_HOME/gpextable/pxf @@ -42,4 +48,3 @@ ORDER BY 1; pxfwritable_export | gpdbwritableformatter_export | $PXF_HOME/gpextable/pxf pxfwritable_import | gpdbwritableformatter_import | $PXF_HOME/gpextable/pxf (5 rows) - diff --git a/automation/tincrepo/main/pxf/features/gpupgrade/step_1_before_running_pxf_pre_gpupgrade/runTest.py b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_1_before_running_pxf_pre_gpupgrade/runTest.py similarity index 100% rename from automation/tincrepo/main/pxf/features/gpupgrade/step_1_before_running_pxf_pre_gpupgrade/runTest.py rename to automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_1_before_running_pxf_pre_gpupgrade/runTest.py diff --git a/automation/tincrepo/main/pxf/features/gpupgrade/step_3_after_running_pxf_post_gpupgrade/sql/query01.sql b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_1_before_running_pxf_pre_gpupgrade/sql/query01.sql similarity index 86% rename from automation/tincrepo/main/pxf/features/gpupgrade/step_3_after_running_pxf_post_gpupgrade/sql/query01.sql rename to automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_1_before_running_pxf_pre_gpupgrade/sql/query01.sql index 3ec652f336..2b5ba5c029 100644 --- a/automation/tincrepo/main/pxf/features/gpupgrade/step_3_after_running_pxf_post_gpupgrade/sql/query01.sql +++ b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_1_before_running_pxf_pre_gpupgrade/sql/query01.sql @@ -6,6 +6,8 @@ -- -- end_matchsubs +SELECT extname, extversion FROM pg_catalog.pg_extension WHERE extname = 'pxf'; + SELECT * FROM pxf_gpupgrade_test; SHOW dynamic_library_path; diff --git a/automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_2_after_running_pxf_pre_gpupgrade/__init__.py b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_2_after_running_pxf_pre_gpupgrade/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/gpupgrade/step_2_after_running_pxf_pre_gpupgrade/expected/query01.ans b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_2_after_running_pxf_pre_gpupgrade/expected/query01.ans similarity index 87% rename from automation/tincrepo/main/pxf/features/gpupgrade/step_2_after_running_pxf_pre_gpupgrade/expected/query01.ans rename to automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_2_after_running_pxf_pre_gpupgrade/expected/query01.ans index dcbf6882d7..1dc81e3553 100644 --- a/automation/tincrepo/main/pxf/features/gpupgrade/step_2_after_running_pxf_pre_gpupgrade/expected/query01.ans +++ b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_2_after_running_pxf_pre_gpupgrade/expected/query01.ans @@ -7,6 +7,12 @@ -- s{/usr/local/pxf-(dev|gp\d)}{\$PXF_HOME} -- -- end_matchsubs +SELECT extname, extversion FROM pg_catalog.pg_extension WHERE extname = 'pxf'; + extname | extversion +---------+------------ + pxf | 2.0 +(1 row) + SELECT * FROM pxf_gpupgrade_test; name | num | dub | longnum | bool --------+-----+-----+---------------+------ @@ -34,7 +40,7 @@ FROM pg_catalog.pg_extension AS e INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) WHERE d.deptype = 'e' AND e.extname = 'pxf' ORDER BY 1; - proname | prosrc | probin + proname | prosrc | probin --------------------+------------------------------+-------- pxf_read | pxfprotocol_import | pxf pxf_validate | pxfprotocol_validate_urls | pxf @@ -42,4 +48,3 @@ ORDER BY 1; pxfwritable_export | gpdbwritableformatter_export | pxf pxfwritable_import | gpdbwritableformatter_import | pxf (5 rows) - diff --git a/automation/tincrepo/main/pxf/features/gpupgrade/step_2_after_running_pxf_pre_gpupgrade/runTest.py b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_2_after_running_pxf_pre_gpupgrade/runTest.py similarity index 100% rename from automation/tincrepo/main/pxf/features/gpupgrade/step_2_after_running_pxf_pre_gpupgrade/runTest.py rename to automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_2_after_running_pxf_pre_gpupgrade/runTest.py diff --git a/automation/tincrepo/main/pxf/features/gpupgrade/step_1_before_running_pxf_pre_gpupgrade/sql/query01.sql b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_2_after_running_pxf_pre_gpupgrade/sql/query01.sql similarity index 86% rename from automation/tincrepo/main/pxf/features/gpupgrade/step_1_before_running_pxf_pre_gpupgrade/sql/query01.sql rename to automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_2_after_running_pxf_pre_gpupgrade/sql/query01.sql index 3ec652f336..2b5ba5c029 100644 --- a/automation/tincrepo/main/pxf/features/gpupgrade/step_1_before_running_pxf_pre_gpupgrade/sql/query01.sql +++ b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_2_after_running_pxf_pre_gpupgrade/sql/query01.sql @@ -6,6 +6,8 @@ -- -- end_matchsubs +SELECT extname, extversion FROM pg_catalog.pg_extension WHERE extname = 'pxf'; + SELECT * FROM pxf_gpupgrade_test; SHOW dynamic_library_path; diff --git a/automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_3_after_running_pxf_post_gpupgrade/__init__.py b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_3_after_running_pxf_post_gpupgrade/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/gpupgrade/step_3_after_running_pxf_post_gpupgrade/expected/query01.ans b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_3_after_running_pxf_post_gpupgrade/expected/query01.ans similarity index 87% rename from automation/tincrepo/main/pxf/features/gpupgrade/step_3_after_running_pxf_post_gpupgrade/expected/query01.ans rename to automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_3_after_running_pxf_post_gpupgrade/expected/query01.ans index fe392964f8..7b7484635e 100644 --- a/automation/tincrepo/main/pxf/features/gpupgrade/step_3_after_running_pxf_post_gpupgrade/expected/query01.ans +++ b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_3_after_running_pxf_post_gpupgrade/expected/query01.ans @@ -5,6 +5,12 @@ -- s{/usr/local/pxf-(dev|gp\d)}{\$PXF_HOME} -- -- end_matchsubs +SELECT extname, extversion FROM pg_catalog.pg_extension WHERE extname = 'pxf'; + extname | extversion +---------+------------ + pxf | 2.0 +(1 row) + SELECT * FROM pxf_gpupgrade_test; name | num | dub | longnum | bool --------+-----+-----+---------------+------ @@ -32,7 +38,7 @@ FROM pg_catalog.pg_extension AS e INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) WHERE d.deptype = 'e' AND e.extname = 'pxf' ORDER BY 1; - proname | prosrc | probin + proname | prosrc | probin --------------------+------------------------------+---------------------------------- pxf_read | pxfprotocol_import | $PXF_HOME/gpextable/pxf pxf_validate | pxfprotocol_validate_urls | $PXF_HOME/gpextable/pxf @@ -40,4 +46,3 @@ ORDER BY 1; pxfwritable_export | gpdbwritableformatter_export | $PXF_HOME/gpextable/pxf pxfwritable_import | gpdbwritableformatter_import | $PXF_HOME/gpextable/pxf (5 rows) - diff --git a/automation/tincrepo/main/pxf/features/gpupgrade/step_3_after_running_pxf_post_gpupgrade/runTest.py b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_3_after_running_pxf_post_gpupgrade/runTest.py similarity index 100% rename from automation/tincrepo/main/pxf/features/gpupgrade/step_3_after_running_pxf_post_gpupgrade/runTest.py rename to automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_3_after_running_pxf_post_gpupgrade/runTest.py diff --git a/automation/tincrepo/main/pxf/features/gpupgrade/step_2_after_running_pxf_pre_gpupgrade/sql/query01.sql b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_3_after_running_pxf_post_gpupgrade/sql/query01.sql similarity index 86% rename from automation/tincrepo/main/pxf/features/gpupgrade/step_2_after_running_pxf_pre_gpupgrade/sql/query01.sql rename to automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_3_after_running_pxf_post_gpupgrade/sql/query01.sql index 3ec652f336..2b5ba5c029 100644 --- a/automation/tincrepo/main/pxf/features/gpupgrade/step_2_after_running_pxf_pre_gpupgrade/sql/query01.sql +++ b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_0/step_3_after_running_pxf_post_gpupgrade/sql/query01.sql @@ -6,6 +6,8 @@ -- -- end_matchsubs +SELECT extname, extversion FROM pg_catalog.pg_extension WHERE extname = 'pxf'; + SELECT * FROM pxf_gpupgrade_test; SHOW dynamic_library_path; diff --git a/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/__init__.py b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_1_before_running_pxf_pre_gpupgrade/__init__.py b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_1_before_running_pxf_pre_gpupgrade/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_1_before_running_pxf_pre_gpupgrade/expected/query01.ans b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_1_before_running_pxf_pre_gpupgrade/expected/query01.ans new file mode 100644 index 0000000000..36314ef52c --- /dev/null +++ b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_1_before_running_pxf_pre_gpupgrade/expected/query01.ans @@ -0,0 +1,52 @@ +-- start_ignore +-- end_ignore +-- @description query01 for PXF gpupgrade test on small data +-- start_matchsubs +-- +-- m{.*/usr/local/pxf-(dev|gp\d).*} +-- s{/usr/local/pxf-(dev|gp\d)}{\$PXF_HOME} +-- +-- end_matchsubs +SELECT extname, extversion FROM pg_catalog.pg_extension WHERE extname = 'pxf'; + extname | extversion +---------+------------ + pxf | 2.1 +(1 row) + +SELECT * FROM pxf_gpupgrade_test; + name | num | dub | longnum | bool +--------+-----+-----+---------------+------ + row_1 | 1 | 1 | 100000000000 | f + row_2 | 2 | 2 | 200000000000 | t + row_3 | 3 | 3 | 300000000000 | f + row_4 | 4 | 4 | 400000000000 | t + row_5 | 5 | 5 | 500000000000 | f + row_6 | 6 | 6 | 600000000000 | t + row_7 | 7 | 7 | 700000000000 | f + row_8 | 8 | 8 | 800000000000 | t + row_9 | 9 | 9 | 900000000000 | f + row_10 | 10 | 10 | 1000000000000 | t +(10 rows) + +SHOW dynamic_library_path; + dynamic_library_path +---------------------- + $libdir +(1 row) + +SELECT p.proname, p.prosrc, p.probin +FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) +WHERE d.deptype = 'e' AND e.extname = 'pxf' +ORDER BY 1; + proname | prosrc | probin +---------------------+------------------------------+---------------------------------- + pxf_read | pxfprotocol_import | $PXF_HOME/gpextable/pxf + pxf_validate | pxfprotocol_validate_urls | $PXF_HOME/gpextable/pxf + pxf_write | pxfprotocol_export | $PXF_HOME/gpextable/pxf + pxfdelimited_import | pxfdelimited_import | $PXF_HOME/gpextable/pxf + pxfwritable_export | gpdbwritableformatter_export | $PXF_HOME/gpextable/pxf + pxfwritable_import | gpdbwritableformatter_import | $PXF_HOME/gpextable/pxf +(6 rows) + diff --git a/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_1_before_running_pxf_pre_gpupgrade/runTest.py b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_1_before_running_pxf_pre_gpupgrade/runTest.py new file mode 100644 index 0000000000..e30838ab3d --- /dev/null +++ b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_1_before_running_pxf_pre_gpupgrade/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfGpupgradeStep1BeforeRunningPxfPreGpupgrade(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_1_before_running_pxf_pre_gpupgrade/sql/query01.sql b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_1_before_running_pxf_pre_gpupgrade/sql/query01.sql new file mode 100644 index 0000000000..2b5ba5c029 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_1_before_running_pxf_pre_gpupgrade/sql/query01.sql @@ -0,0 +1,20 @@ +-- @description query01 for PXF gpupgrade test on small data +-- start_matchsubs +-- +-- m{.*/usr/local/pxf-(dev|gp\d).*} +-- s{/usr/local/pxf-(dev|gp\d)}{\$PXF_HOME} +-- +-- end_matchsubs + +SELECT extname, extversion FROM pg_catalog.pg_extension WHERE extname = 'pxf'; + +SELECT * FROM pxf_gpupgrade_test; + +SHOW dynamic_library_path; + +SELECT p.proname, p.prosrc, p.probin +FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) +WHERE d.deptype = 'e' AND e.extname = 'pxf' +ORDER BY 1; \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_2_after_running_pxf_pre_gpupgrade/__init__.py b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_2_after_running_pxf_pre_gpupgrade/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_2_after_running_pxf_pre_gpupgrade/expected/query01.ans b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_2_after_running_pxf_pre_gpupgrade/expected/query01.ans new file mode 100644 index 0000000000..57fb6851b1 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_2_after_running_pxf_pre_gpupgrade/expected/query01.ans @@ -0,0 +1,52 @@ +-- start_ignore +-- end_ignore +-- @description query01 for PXF gpupgrade test on small data +-- start_matchsubs +-- +-- m{.*/usr/local/pxf-(dev|gp\d).*} +-- s{/usr/local/pxf-(dev|gp\d)}{\$PXF_HOME} +-- +-- end_matchsubs +SELECT extname, extversion FROM pg_catalog.pg_extension WHERE extname = 'pxf'; + extname | extversion +---------+------------ + pxf | 2.1 +(1 row) + +SELECT * FROM pxf_gpupgrade_test; + name | num | dub | longnum | bool +--------+-----+-----+---------------+------ + row_1 | 1 | 1 | 100000000000 | f + row_2 | 2 | 2 | 200000000000 | t + row_3 | 3 | 3 | 300000000000 | f + row_4 | 4 | 4 | 400000000000 | t + row_5 | 5 | 5 | 500000000000 | f + row_6 | 6 | 6 | 600000000000 | t + row_7 | 7 | 7 | 700000000000 | f + row_8 | 8 | 8 | 800000000000 | t + row_9 | 9 | 9 | 900000000000 | f + row_10 | 10 | 10 | 1000000000000 | t +(10 rows) + +SHOW dynamic_library_path; + dynamic_library_path +---------------------- + $PXF_HOME/gpextable:$libdir +(1 row) + +SELECT p.proname, p.prosrc, p.probin +FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) +WHERE d.deptype = 'e' AND e.extname = 'pxf' +ORDER BY 1; + proname | prosrc | probin +---------------------+------------------------------+-------- + pxf_read | pxfprotocol_import | pxf + pxf_validate | pxfprotocol_validate_urls | pxf + pxf_write | pxfprotocol_export | pxf + pxfdelimited_import | pxfdelimited_import | pxf + pxfwritable_export | gpdbwritableformatter_export | pxf + pxfwritable_import | gpdbwritableformatter_import | pxf +(6 rows) + diff --git a/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_2_after_running_pxf_pre_gpupgrade/runTest.py b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_2_after_running_pxf_pre_gpupgrade/runTest.py new file mode 100644 index 0000000000..c95831f419 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_2_after_running_pxf_pre_gpupgrade/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfGpupgradeStep2AfterRunningPxfPreGpupgrade(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_2_after_running_pxf_pre_gpupgrade/sql/query01.sql b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_2_after_running_pxf_pre_gpupgrade/sql/query01.sql new file mode 100644 index 0000000000..2b5ba5c029 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_2_after_running_pxf_pre_gpupgrade/sql/query01.sql @@ -0,0 +1,20 @@ +-- @description query01 for PXF gpupgrade test on small data +-- start_matchsubs +-- +-- m{.*/usr/local/pxf-(dev|gp\d).*} +-- s{/usr/local/pxf-(dev|gp\d)}{\$PXF_HOME} +-- +-- end_matchsubs + +SELECT extname, extversion FROM pg_catalog.pg_extension WHERE extname = 'pxf'; + +SELECT * FROM pxf_gpupgrade_test; + +SHOW dynamic_library_path; + +SELECT p.proname, p.prosrc, p.probin +FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) +WHERE d.deptype = 'e' AND e.extname = 'pxf' +ORDER BY 1; \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_3_after_running_pxf_post_gpupgrade/__init__.py b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_3_after_running_pxf_post_gpupgrade/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_3_after_running_pxf_post_gpupgrade/expected/query01.ans b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_3_after_running_pxf_post_gpupgrade/expected/query01.ans new file mode 100644 index 0000000000..88833d6376 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_3_after_running_pxf_post_gpupgrade/expected/query01.ans @@ -0,0 +1,50 @@ +-- @description query01 for PXF gpupgrade test on small data +-- start_matchsubs +-- +-- m{.*/usr/local/pxf-(dev|gp\d).*} +-- s{/usr/local/pxf-(dev|gp\d)}{\$PXF_HOME} +-- +-- end_matchsubs +SELECT extname, extversion FROM pg_catalog.pg_extension WHERE extname = 'pxf'; + extname | extversion +---------+------------ + pxf | 2.1 +(1 row) + +SELECT * FROM pxf_gpupgrade_test; + name | num | dub | longnum | bool +--------+-----+-----+---------------+------ + row_1 | 1 | 1 | 100000000000 | f + row_2 | 2 | 2 | 200000000000 | t + row_3 | 3 | 3 | 300000000000 | f + row_4 | 4 | 4 | 400000000000 | t + row_5 | 5 | 5 | 500000000000 | f + row_6 | 6 | 6 | 600000000000 | t + row_7 | 7 | 7 | 700000000000 | f + row_8 | 8 | 8 | 800000000000 | t + row_9 | 9 | 9 | 900000000000 | f + row_10 | 10 | 10 | 1000000000000 | t +(10 rows) + +SHOW dynamic_library_path; + dynamic_library_path +---------------------- + $libdir +(1 row) + +SELECT p.proname, p.prosrc, p.probin +FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) +WHERE d.deptype = 'e' AND e.extname = 'pxf' +ORDER BY 1; + proname | prosrc | probin +---------------------+------------------------------+---------------------------------- + pxf_read | pxfprotocol_import | $PXF_HOME/gpextable/pxf + pxf_validate | pxfprotocol_validate_urls | $PXF_HOME/gpextable/pxf + pxf_write | pxfprotocol_export | $PXF_HOME/gpextable/pxf + pxfdelimited_import | pxfdelimited_import | $PXF_HOME/gpextable/pxf + pxfwritable_export | gpdbwritableformatter_export | $PXF_HOME/gpextable/pxf + pxfwritable_import | gpdbwritableformatter_import | $PXF_HOME/gpextable/pxf +(6 rows) + diff --git a/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_3_after_running_pxf_post_gpupgrade/runTest.py b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_3_after_running_pxf_post_gpupgrade/runTest.py new file mode 100644 index 0000000000..6742df6f97 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_3_after_running_pxf_post_gpupgrade/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfGpupgradeStep3AfterRunningPxfPostGpupgrade(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_3_after_running_pxf_post_gpupgrade/sql/query01.sql b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_3_after_running_pxf_post_gpupgrade/sql/query01.sql new file mode 100644 index 0000000000..2b5ba5c029 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/gpupgrade/extension2_1/step_3_after_running_pxf_post_gpupgrade/sql/query01.sql @@ -0,0 +1,20 @@ +-- @description query01 for PXF gpupgrade test on small data +-- start_matchsubs +-- +-- m{.*/usr/local/pxf-(dev|gp\d).*} +-- s{/usr/local/pxf-(dev|gp\d)}{\$PXF_HOME} +-- +-- end_matchsubs + +SELECT extname, extversion FROM pg_catalog.pg_extension WHERE extname = 'pxf'; + +SELECT * FROM pxf_gpupgrade_test; + +SHOW dynamic_library_path; + +SELECT p.proname, p.prosrc, p.probin +FROM pg_catalog.pg_extension AS e + INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) + INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid) +WHERE d.deptype = 'e' AND e.extname = 'pxf' +ORDER BY 1; \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/__init__.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/__init__.py new file mode 100755 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding/__init__.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding/__init__.py new file mode 100755 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding/expected/query01.ans b/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding/expected/query01.ans new file mode 100755 index 0000000000..54c87af484 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding/expected/query01.ans @@ -0,0 +1,9 @@ +-- @description query01 for PXF Multibyte delimiter differentEncoding case + +SELECT * from pxf_multibyte_encoding ORDER BY num1; + num1 | word +------+-------------------------------------- + 3 | règles d'automation + 4 | tá sé seo le tástáil dea- + 5 | minden amire szüksége van a szeretet +(3 rows) \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding/runTest.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding/runTest.py new file mode 100755 index 0000000000..067825963b --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfMultibyteDelimDifferentEncoding(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding/sql/query01.sql b/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding/sql/query01.sql new file mode 100755 index 0000000000..ccfec08c22 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding/sql/query01.sql @@ -0,0 +1,3 @@ +-- @description query01 for PXF Multibyte delimiter differentEncoding case + +SELECT * from pxf_multibyte_encoding ORDER BY num1; \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_bytes/__init__.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_bytes/__init__.py new file mode 100755 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_bytes/expected/query01.ans b/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_bytes/expected/query01.ans new file mode 100755 index 0000000000..fd256950b4 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_bytes/expected/query01.ans @@ -0,0 +1,9 @@ +-- @description query01 for PXF Multibyte delimiter encoding case with byte representation + +SELECT * from pxf_multibyte_encoding_bytes ORDER BY num1; + num1 | word +------+-------------------------------------- + 3 | règles d'automation + 4 | tá sé seo le tástáil dea- + 5 | minden amire szüksége van a szeretet +(3 rows) \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_bytes/runTest.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_bytes/runTest.py new file mode 100755 index 0000000000..b37b69894f --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_bytes/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfMultibyteDelimDifferentEncodingBytes(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_bytes/sql/query01.sql b/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_bytes/sql/query01.sql new file mode 100755 index 0000000000..e72c483fb6 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_bytes/sql/query01.sql @@ -0,0 +1,3 @@ +-- @description query01 for PXF Multibyte delimiter encoding case with byte representation + +SELECT * from pxf_multibyte_encoding_bytes ORDER BY num1; \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_quote/__init__.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_quote/__init__.py new file mode 100755 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_quote/expected/query01.ans b/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_quote/expected/query01.ans new file mode 100755 index 0000000000..d89be58292 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_quote/expected/query01.ans @@ -0,0 +1,9 @@ +-- @description query01 for PXF Multibyte delimiter differentEncoding case with quotes + +SELECT * from pxf_multibyte_encoding_quote ORDER BY num1; + num1 | word +------+-------------------------------------- + 3 | règles d'automation + 4 | tá sé seo le tástáil dea- + 5 | minden amire szüksége van a szeretet +(3 rows) \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_quote/runTest.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_quote/runTest.py new file mode 100755 index 0000000000..c0aa68cf43 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_quote/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfMultibyteDelimDifferentEncodingWithQuote(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_quote/sql/query01.sql b/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_quote/sql/query01.sql new file mode 100755 index 0000000000..fb691ec1b8 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_quote/sql/query01.sql @@ -0,0 +1,3 @@ +-- @description query01 for PXF Multibyte delimiter differentEncoding case with quotes + +SELECT * from pxf_multibyte_encoding_quote ORDER BY num1; \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_quote_escape/__init__.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_quote_escape/__init__.py new file mode 100755 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_quote_escape/expected/query01.ans b/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_quote_escape/expected/query01.ans new file mode 100755 index 0000000000..c434a74dd7 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_quote_escape/expected/query01.ans @@ -0,0 +1,9 @@ +-- @description query01 for PXF Multibyte delimiter differentEncoding case with quotes and escape + +SELECT * from pxf_multibyte_encoding_quote_escape ORDER BY num1; + num1 | word +------+-------------------------------------- + 3 | règles d'automation + 4 | tá sé seo le tástáil dea- + 5 | minden amire szüksége van a szeretet +(3 rows) \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_quote_escape/runTest.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_quote_escape/runTest.py new file mode 100755 index 0000000000..5e0b7172b1 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_quote_escape/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfMultibyteDelimDifferentEncodingWithQuoteEscape(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_quote_escape/sql/query01.sql b/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_quote_escape/sql/query01.sql new file mode 100755 index 0000000000..43251ef4ca --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/encoding_quote_escape/sql/query01.sql @@ -0,0 +1,3 @@ +-- @description query01 for PXF Multibyte delimiter differentEncoding case with quotes and escape + +SELECT * from pxf_multibyte_encoding_quote_escape ORDER BY num1; \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/four_byte/__init__.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/four_byte/__init__.py new file mode 100755 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/four_byte/expected/query01.ans b/automation/tincrepo/main/pxf/features/multibyte_delimiter/four_byte/expected/query01.ans new file mode 100755 index 0000000000..fc6afde521 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/four_byte/expected/query01.ans @@ -0,0 +1,201 @@ +-- @description query01 for PXF Multibyte delimiter, 4-byte delim cases + +SELECT * from pxf_multibyte_fourbyte_data ORDER BY n1; + s1 | s2 | s3 | d1 | n1 | n2 | n3 | n4 | n5 | n6 | n7 | s11 | s12 | s13 | d11 | n11 | n12 | n13 | n14 | n15 | n16 | n17 +-------+--------+---------+---------------------+-----+------+-------+-------+-------+-------+-------+-------+--------+---------+---------------------+-----+------+-------+-------+-------+-------+------- + s_1 | s_10 | s_100 | 1973-03-03 09:46:40 | 1 | 10 | 100 | 100 | 100 | 100 | 100 | s_1 | s_10 | s_100 | 1973-03-03 09:46:40 | 1 | 10 | 100 | 100 | 100 | 100 | 100 + s_2 | s_20 | s_200 | 1976-05-03 07:33:20 | 2 | 20 | 200 | 200 | 200 | 200 | 200 | s_2 | s_20 | s_200 | 1976-05-03 07:33:20 | 2 | 20 | 200 | 200 | 200 | 200 | 200 + s_3 | s_30 | s_300 | 1979-07-05 05:20:00 | 3 | 30 | 300 | 300 | 300 | 300 | 300 | s_3 | s_30 | s_300 | 1979-07-05 05:20:00 | 3 | 30 | 300 | 300 | 300 | 300 | 300 + s_4 | s_40 | s_400 | 1982-09-04 03:06:40 | 4 | 40 | 400 | 400 | 400 | 400 | 400 | s_4 | s_40 | s_400 | 1982-09-04 03:06:40 | 4 | 40 | 400 | 400 | 400 | 400 | 400 + s_5 | s_50 | s_500 | 1985-11-05 12:53:20 | 5 | 50 | 500 | 500 | 500 | 500 | 500 | s_5 | s_50 | s_500 | 1985-11-05 12:53:20 | 5 | 50 | 500 | 500 | 500 | 500 | 500 + s_6 | s_60 | s_600 | 1989-01-05 10:40:00 | 6 | 60 | 600 | 600 | 600 | 600 | 600 | s_6 | s_60 | s_600 | 1989-01-05 10:40:00 | 6 | 60 | 600 | 600 | 600 | 600 | 600 + s_7 | s_70 | s_700 | 1992-03-07 08:26:40 | 7 | 70 | 700 | 700 | 700 | 700 | 700 | s_7 | s_70 | s_700 | 1992-03-07 08:26:40 | 7 | 70 | 700 | 700 | 700 | 700 | 700 + s_8 | s_80 | s_800 | 1995-05-09 06:13:20 | 8 | 80 | 800 | 800 | 800 | 800 | 800 | s_8 | s_80 | s_800 | 1995-05-09 06:13:20 | 8 | 80 | 800 | 800 | 800 | 800 | 800 + s_9 | s_90 | s_900 | 1998-07-09 04:00:00 | 9 | 90 | 900 | 900 | 900 | 900 | 900 | s_9 | s_90 | s_900 | 1998-07-09 04:00:00 | 9 | 90 | 900 | 900 | 900 | 900 | 900 + s_10 | s_100 | s_1000 | 2001-09-09 01:46:40 | 10 | 100 | 1000 | 1000 | 1000 | 1000 | 1000 | s_10 | s_100 | s_1000 | 2001-09-09 01:46:40 | 10 | 100 | 1000 | 1000 | 1000 | 1000 | 1000 + s_11 | s_110 | s_1100 | 2004-11-09 11:33:20 | 11 | 110 | 1100 | 1100 | 1100 | 1100 | 1100 | s_11 | s_110 | s_1100 | 2004-11-09 11:33:20 | 11 | 110 | 1100 | 1100 | 1100 | 1100 | 1100 + s_12 | s_120 | s_1200 | 2008-01-10 09:20:00 | 12 | 120 | 1200 | 1200 | 1200 | 1200 | 1200 | s_12 | s_120 | s_1200 | 2008-01-10 09:20:00 | 12 | 120 | 1200 | 1200 | 1200 | 1200 | 1200 + s_13 | s_130 | s_1300 | 2011-03-13 07:06:40 | 13 | 130 | 1300 | 1300 | 1300 | 1300 | 1300 | s_13 | s_130 | s_1300 | 2011-03-13 07:06:40 | 13 | 130 | 1300 | 1300 | 1300 | 1300 | 1300 + s_14 | s_140 | s_1400 | 2014-05-13 04:53:20 | 14 | 140 | 1400 | 1400 | 1400 | 1400 | 1400 | s_14 | s_140 | s_1400 | 2014-05-13 04:53:20 | 14 | 140 | 1400 | 1400 | 1400 | 1400 | 1400 + s_15 | s_150 | s_1500 | 2017-07-14 02:40:00 | 15 | 150 | 1500 | 1500 | 1500 | 1500 | 1500 | s_15 | s_150 | s_1500 | 2017-07-14 02:40:00 | 15 | 150 | 1500 | 1500 | 1500 | 1500 | 1500 + s_16 | s_160 | s_1600 | 2020-09-13 12:26:40 | 16 | 160 | 1600 | 1600 | 1600 | 1600 | 1600 | s_16 | s_160 | s_1600 | 2020-09-13 12:26:40 | 16 | 160 | 1600 | 1600 | 1600 | 1600 | 1600 + s_17 | s_170 | s_1700 | 2023-11-14 10:13:20 | 17 | 170 | 1700 | 1700 | 1700 | 1700 | 1700 | s_17 | s_170 | s_1700 | 2023-11-14 10:13:20 | 17 | 170 | 1700 | 1700 | 1700 | 1700 | 1700 + s_18 | s_180 | s_1800 | 2027-01-15 08:00:00 | 18 | 180 | 1800 | 1800 | 1800 | 1800 | 1800 | s_18 | s_180 | s_1800 | 2027-01-15 08:00:00 | 18 | 180 | 1800 | 1800 | 1800 | 1800 | 1800 + s_19 | s_190 | s_1900 | 2030-03-17 05:46:40 | 19 | 190 | 1900 | 1900 | 1900 | 1900 | 1900 | s_19 | s_190 | s_1900 | 2030-03-17 05:46:40 | 19 | 190 | 1900 | 1900 | 1900 | 1900 | 1900 + s_20 | s_200 | s_2000 | 2033-05-18 03:33:20 | 20 | 200 | 2000 | 2000 | 2000 | 2000 | 2000 | s_20 | s_200 | s_2000 | 2033-05-18 03:33:20 | 20 | 200 | 2000 | 2000 | 2000 | 2000 | 2000 + s_21 | s_210 | s_2100 | 2036-07-18 01:20:00 | 21 | 210 | 2100 | 2100 | 2100 | 2100 | 2100 | s_21 | s_210 | s_2100 | 2036-07-18 01:20:00 | 21 | 210 | 2100 | 2100 | 2100 | 2100 | 2100 + s_22 | s_220 | s_2200 | 2039-09-18 11:06:40 | 22 | 220 | 2200 | 2200 | 2200 | 2200 | 2200 | s_22 | s_220 | s_2200 | 2039-09-18 11:06:40 | 22 | 220 | 2200 | 2200 | 2200 | 2200 | 2200 + s_23 | s_230 | s_2300 | 2042-11-19 08:53:20 | 23 | 230 | 2300 | 2300 | 2300 | 2300 | 2300 | s_23 | s_230 | s_2300 | 2042-11-19 08:53:20 | 23 | 230 | 2300 | 2300 | 2300 | 2300 | 2300 + s_24 | s_240 | s_2400 | 2046-01-19 06:40:00 | 24 | 240 | 2400 | 2400 | 2400 | 2400 | 2400 | s_24 | s_240 | s_2400 | 2046-01-19 06:40:00 | 24 | 240 | 2400 | 2400 | 2400 | 2400 | 2400 + s_25 | s_250 | s_2500 | 2049-03-22 04:26:40 | 25 | 250 | 2500 | 2500 | 2500 | 2500 | 2500 | s_25 | s_250 | s_2500 | 2049-03-22 04:26:40 | 25 | 250 | 2500 | 2500 | 2500 | 2500 | 2500 + s_26 | s_260 | s_2600 | 2052-05-22 02:13:20 | 26 | 260 | 2600 | 2600 | 2600 | 2600 | 2600 | s_26 | s_260 | s_2600 | 2052-05-22 02:13:20 | 26 | 260 | 2600 | 2600 | 2600 | 2600 | 2600 + s_27 | s_270 | s_2700 | 2055-07-24 12:00:00 | 27 | 270 | 2700 | 2700 | 2700 | 2700 | 2700 | s_27 | s_270 | s_2700 | 2055-07-24 12:00:00 | 27 | 270 | 2700 | 2700 | 2700 | 2700 | 2700 + s_28 | s_280 | s_2800 | 2058-09-23 09:46:40 | 28 | 280 | 2800 | 2800 | 2800 | 2800 | 2800 | s_28 | s_280 | s_2800 | 2058-09-23 09:46:40 | 28 | 280 | 2800 | 2800 | 2800 | 2800 | 2800 + s_29 | s_290 | s_2900 | 2061-11-23 07:33:20 | 29 | 290 | 2900 | 2900 | 2900 | 2900 | 2900 | s_29 | s_290 | s_2900 | 2061-11-23 07:33:20 | 29 | 290 | 2900 | 2900 | 2900 | 2900 | 2900 + s_30 | s_300 | s_3000 | 2065-01-24 05:20:00 | 30 | 300 | 3000 | 3000 | 3000 | 3000 | 3000 | s_30 | s_300 | s_3000 | 2065-01-24 05:20:00 | 30 | 300 | 3000 | 3000 | 3000 | 3000 | 3000 + s_31 | s_310 | s_3100 | 2068-03-26 03:06:40 | 31 | 310 | 3100 | 3100 | 3100 | 3100 | 3100 | s_31 | s_310 | s_3100 | 2068-03-26 03:06:40 | 31 | 310 | 3100 | 3100 | 3100 | 3100 | 3100 + s_32 | s_320 | s_3200 | 2071-05-28 12:53:20 | 32 | 320 | 3200 | 3200 | 3200 | 3200 | 3200 | s_32 | s_320 | s_3200 | 2071-05-28 12:53:20 | 32 | 320 | 3200 | 3200 | 3200 | 3200 | 3200 + s_33 | s_330 | s_3300 | 2074-07-28 10:40:00 | 33 | 330 | 3300 | 3300 | 3300 | 3300 | 3300 | s_33 | s_330 | s_3300 | 2074-07-28 10:40:00 | 33 | 330 | 3300 | 3300 | 3300 | 3300 | 3300 + s_34 | s_340 | s_3400 | 2077-09-27 08:26:40 | 34 | 340 | 3400 | 3400 | 3400 | 3400 | 3400 | s_34 | s_340 | s_3400 | 2077-09-27 08:26:40 | 34 | 340 | 3400 | 3400 | 3400 | 3400 | 3400 + s_35 | s_350 | s_3500 | 2080-11-28 06:13:20 | 35 | 350 | 3500 | 3500 | 3500 | 3500 | 3500 | s_35 | s_350 | s_3500 | 2080-11-28 06:13:20 | 35 | 350 | 3500 | 3500 | 3500 | 3500 | 3500 + s_36 | s_360 | s_3600 | 2084-01-29 04:00:00 | 36 | 360 | 3600 | 3600 | 3600 | 3600 | 3600 | s_36 | s_360 | s_3600 | 2084-01-29 04:00:00 | 36 | 360 | 3600 | 3600 | 3600 | 3600 | 3600 + s_37 | s_370 | s_3700 | 2087-04-01 01:46:40 | 37 | 370 | 3700 | 3700 | 3700 | 3700 | 3700 | s_37 | s_370 | s_3700 | 2087-04-01 01:46:40 | 37 | 370 | 3700 | 3700 | 3700 | 3700 | 3700 + s_38 | s_380 | s_3800 | 2090-06-01 11:33:20 | 38 | 380 | 3800 | 3800 | 3800 | 3800 | 3800 | s_38 | s_380 | s_3800 | 2090-06-01 11:33:20 | 38 | 380 | 3800 | 3800 | 3800 | 3800 | 3800 + s_39 | s_390 | s_3900 | 2093-08-01 09:20:00 | 39 | 390 | 3900 | 3900 | 3900 | 3900 | 3900 | s_39 | s_390 | s_3900 | 2093-08-01 09:20:00 | 39 | 390 | 3900 | 3900 | 3900 | 3900 | 3900 + s_40 | s_400 | s_4000 | 2096-10-02 07:06:40 | 40 | 400 | 4000 | 4000 | 4000 | 4000 | 4000 | s_40 | s_400 | s_4000 | 2096-10-02 07:06:40 | 40 | 400 | 4000 | 4000 | 4000 | 4000 | 4000 + s_41 | s_410 | s_4100 | 2099-12-03 04:53:20 | 41 | 410 | 4100 | 4100 | 4100 | 4100 | 4100 | s_41 | s_410 | s_4100 | 2099-12-03 04:53:20 | 41 | 410 | 4100 | 4100 | 4100 | 4100 | 4100 + s_42 | s_420 | s_4200 | 2103-02-04 02:40:00 | 42 | 420 | 4200 | 4200 | 4200 | 4200 | 4200 | s_42 | s_420 | s_4200 | 2103-02-04 02:40:00 | 42 | 420 | 4200 | 4200 | 4200 | 4200 | 4200 + s_43 | s_430 | s_4300 | 2106-04-06 12:26:40 | 43 | 430 | 4300 | 4300 | 4300 | 4300 | 4300 | s_43 | s_430 | s_4300 | 2106-04-06 12:26:40 | 43 | 430 | 4300 | 4300 | 4300 | 4300 | 4300 + s_44 | s_440 | s_4400 | 2109-06-06 10:13:20 | 44 | 440 | 4400 | 4400 | 4400 | 4400 | 4400 | s_44 | s_440 | s_4400 | 2109-06-06 10:13:20 | 44 | 440 | 4400 | 4400 | 4400 | 4400 | 4400 + s_45 | s_450 | s_4500 | 2112-08-07 08:00:00 | 45 | 450 | 4500 | 4500 | 4500 | 4500 | 4500 | s_45 | s_450 | s_4500 | 2112-08-07 08:00:00 | 45 | 450 | 4500 | 4500 | 4500 | 4500 | 4500 + s_46 | s_460 | s_4600 | 2115-10-08 05:46:40 | 46 | 460 | 4600 | 4600 | 4600 | 4600 | 4600 | s_46 | s_460 | s_4600 | 2115-10-08 05:46:40 | 46 | 460 | 4600 | 4600 | 4600 | 4600 | 4600 + s_47 | s_470 | s_4700 | 2118-12-09 03:33:20 | 47 | 470 | 4700 | 4700 | 4700 | 4700 | 4700 | s_47 | s_470 | s_4700 | 2118-12-09 03:33:20 | 47 | 470 | 4700 | 4700 | 4700 | 4700 | 4700 + s_48 | s_480 | s_4800 | 2122-02-08 01:20:00 | 48 | 480 | 4800 | 4800 | 4800 | 4800 | 4800 | s_48 | s_480 | s_4800 | 2122-02-08 01:20:00 | 48 | 480 | 4800 | 4800 | 4800 | 4800 | 4800 + s_49 | s_490 | s_4900 | 2125-04-10 11:06:40 | 49 | 490 | 4900 | 4900 | 4900 | 4900 | 4900 | s_49 | s_490 | s_4900 | 2125-04-10 11:06:40 | 49 | 490 | 4900 | 4900 | 4900 | 4900 | 4900 + s_50 | s_500 | s_5000 | 2128-06-11 08:53:20 | 50 | 500 | 5000 | 5000 | 5000 | 5000 | 5000 | s_50 | s_500 | s_5000 | 2128-06-11 08:53:20 | 50 | 500 | 5000 | 5000 | 5000 | 5000 | 5000 + s_51 | s_510 | s_5100 | 2131-08-12 06:40:00 | 51 | 510 | 5100 | 5100 | 5100 | 5100 | 5100 | s_51 | s_510 | s_5100 | 2131-08-12 06:40:00 | 51 | 510 | 5100 | 5100 | 5100 | 5100 | 5100 + s_52 | s_520 | s_5200 | 2134-10-13 04:26:40 | 52 | 520 | 5200 | 5200 | 5200 | 5200 | 5200 | s_52 | s_520 | s_5200 | 2134-10-13 04:26:40 | 52 | 520 | 5200 | 5200 | 5200 | 5200 | 5200 + s_53 | s_530 | s_5300 | 2137-12-13 02:13:20 | 53 | 530 | 5300 | 5300 | 5300 | 5300 | 5300 | s_53 | s_530 | s_5300 | 2137-12-13 02:13:20 | 53 | 530 | 5300 | 5300 | 5300 | 5300 | 5300 + s_54 | s_540 | s_5400 | 2141-02-13 12:00:00 | 54 | 540 | 5400 | 5400 | 5400 | 5400 | 5400 | s_54 | s_540 | s_5400 | 2141-02-13 12:00:00 | 54 | 540 | 5400 | 5400 | 5400 | 5400 | 5400 + s_55 | s_550 | s_5500 | 2144-04-15 09:46:40 | 55 | 550 | 5500 | 5500 | 5500 | 5500 | 5500 | s_55 | s_550 | s_5500 | 2144-04-15 09:46:40 | 55 | 550 | 5500 | 5500 | 5500 | 5500 | 5500 + s_56 | s_560 | s_5600 | 2147-06-16 07:33:20 | 56 | 560 | 5600 | 5600 | 5600 | 5600 | 5600 | s_56 | s_560 | s_5600 | 2147-06-16 07:33:20 | 56 | 560 | 5600 | 5600 | 5600 | 5600 | 5600 + s_57 | s_570 | s_5700 | 2150-08-17 05:20:00 | 57 | 570 | 5700 | 5700 | 5700 | 5700 | 5700 | s_57 | s_570 | s_5700 | 2150-08-17 05:20:00 | 57 | 570 | 5700 | 5700 | 5700 | 5700 | 5700 + s_58 | s_580 | s_5800 | 2153-10-17 03:06:40 | 58 | 580 | 5800 | 5800 | 5800 | 5800 | 5800 | s_58 | s_580 | s_5800 | 2153-10-17 03:06:40 | 58 | 580 | 5800 | 5800 | 5800 | 5800 | 5800 + s_59 | s_590 | s_5900 | 2156-12-18 12:53:20 | 59 | 590 | 5900 | 5900 | 5900 | 5900 | 5900 | s_59 | s_590 | s_5900 | 2156-12-18 12:53:20 | 59 | 590 | 5900 | 5900 | 5900 | 5900 | 5900 + s_60 | s_600 | s_6000 | 2160-02-18 10:40:00 | 60 | 600 | 6000 | 6000 | 6000 | 6000 | 6000 | s_60 | s_600 | s_6000 | 2160-02-18 10:40:00 | 60 | 600 | 6000 | 6000 | 6000 | 6000 | 6000 + s_61 | s_610 | s_6100 | 2163-04-20 08:26:40 | 61 | 610 | 6100 | 6100 | 6100 | 6100 | 6100 | s_61 | s_610 | s_6100 | 2163-04-20 08:26:40 | 61 | 610 | 6100 | 6100 | 6100 | 6100 | 6100 + s_62 | s_620 | s_6200 | 2166-06-21 06:13:20 | 62 | 620 | 6200 | 6200 | 6200 | 6200 | 6200 | s_62 | s_620 | s_6200 | 2166-06-21 06:13:20 | 62 | 620 | 6200 | 6200 | 6200 | 6200 | 6200 + s_63 | s_630 | s_6300 | 2169-08-21 04:00:00 | 63 | 630 | 6300 | 6300 | 6300 | 6300 | 6300 | s_63 | s_630 | s_6300 | 2169-08-21 04:00:00 | 63 | 630 | 6300 | 6300 | 6300 | 6300 | 6300 + s_64 | s_640 | s_6400 | 2172-10-22 01:46:40 | 64 | 640 | 6400 | 6400 | 6400 | 6400 | 6400 | s_64 | s_640 | s_6400 | 2172-10-22 01:46:40 | 64 | 640 | 6400 | 6400 | 6400 | 6400 | 6400 + s_65 | s_650 | s_6500 | 2175-12-23 11:33:20 | 65 | 650 | 6500 | 6500 | 6500 | 6500 | 6500 | s_65 | s_650 | s_6500 | 2175-12-23 11:33:20 | 65 | 650 | 6500 | 6500 | 6500 | 6500 | 6500 + s_66 | s_660 | s_6600 | 2179-02-22 09:20:00 | 66 | 660 | 6600 | 6600 | 6600 | 6600 | 6600 | s_66 | s_660 | s_6600 | 2179-02-22 09:20:00 | 66 | 660 | 6600 | 6600 | 6600 | 6600 | 6600 + s_67 | s_670 | s_6700 | 2182-04-25 07:06:40 | 67 | 670 | 6700 | 6700 | 6700 | 6700 | 6700 | s_67 | s_670 | s_6700 | 2182-04-25 07:06:40 | 67 | 670 | 6700 | 6700 | 6700 | 6700 | 6700 + s_68 | s_680 | s_6800 | 2185-06-25 04:53:20 | 68 | 680 | 6800 | 6800 | 6800 | 6800 | 6800 | s_68 | s_680 | s_6800 | 2185-06-25 04:53:20 | 68 | 680 | 6800 | 6800 | 6800 | 6800 | 6800 + s_69 | s_690 | s_6900 | 2188-08-26 02:40:00 | 69 | 690 | 6900 | 6900 | 6900 | 6900 | 6900 | s_69 | s_690 | s_6900 | 2188-08-26 02:40:00 | 69 | 690 | 6900 | 6900 | 6900 | 6900 | 6900 + s_70 | s_700 | s_7000 | 2191-10-27 12:26:40 | 70 | 700 | 7000 | 7000 | 7000 | 7000 | 7000 | s_70 | s_700 | s_7000 | 2191-10-27 12:26:40 | 70 | 700 | 7000 | 7000 | 7000 | 7000 | 7000 + s_71 | s_710 | s_7100 | 2194-12-27 10:13:20 | 71 | 710 | 7100 | 7100 | 7100 | 7100 | 7100 | s_71 | s_710 | s_7100 | 2194-12-27 10:13:20 | 71 | 710 | 7100 | 7100 | 7100 | 7100 | 7100 + s_72 | s_720 | s_7200 | 2198-02-27 08:00:00 | 72 | 720 | 7200 | 7200 | 7200 | 7200 | 7200 | s_72 | s_720 | s_7200 | 2198-02-27 08:00:00 | 72 | 720 | 7200 | 7200 | 7200 | 7200 | 7200 + s_73 | s_730 | s_7300 | 2201-04-30 05:46:40 | 73 | 730 | 7300 | 7300 | 7300 | 7300 | 7300 | s_73 | s_730 | s_7300 | 2201-04-30 05:46:40 | 73 | 730 | 7300 | 7300 | 7300 | 7300 | 7300 + s_74 | s_740 | s_7400 | 2204-07-01 03:33:20 | 74 | 740 | 7400 | 7400 | 7400 | 7400 | 7400 | s_74 | s_740 | s_7400 | 2204-07-01 03:33:20 | 74 | 740 | 7400 | 7400 | 7400 | 7400 | 7400 + s_75 | s_750 | s_7500 | 2207-09-01 01:20:00 | 75 | 750 | 7500 | 7500 | 7500 | 7500 | 7500 | s_75 | s_750 | s_7500 | 2207-09-01 01:20:00 | 75 | 750 | 7500 | 7500 | 7500 | 7500 | 7500 + s_76 | s_760 | s_7600 | 2210-11-01 11:06:40 | 76 | 760 | 7600 | 7600 | 7600 | 7600 | 7600 | s_76 | s_760 | s_7600 | 2210-11-01 11:06:40 | 76 | 760 | 7600 | 7600 | 7600 | 7600 | 7600 + s_77 | s_770 | s_7700 | 2214-01-02 08:53:20 | 77 | 770 | 7700 | 7700 | 7700 | 7700 | 7700 | s_77 | s_770 | s_7700 | 2214-01-02 08:53:20 | 77 | 770 | 7700 | 7700 | 7700 | 7700 | 7700 + s_78 | s_780 | s_7800 | 2217-03-04 06:40:00 | 78 | 780 | 7800 | 7800 | 7800 | 7800 | 7800 | s_78 | s_780 | s_7800 | 2217-03-04 06:40:00 | 78 | 780 | 7800 | 7800 | 7800 | 7800 | 7800 + s_79 | s_790 | s_7900 | 2220-05-05 04:26:40 | 79 | 790 | 7900 | 7900 | 7900 | 7900 | 7900 | s_79 | s_790 | s_7900 | 2220-05-05 04:26:40 | 79 | 790 | 7900 | 7900 | 7900 | 7900 | 7900 + s_80 | s_800 | s_8000 | 2223-07-06 02:13:20 | 80 | 800 | 8000 | 8000 | 8000 | 8000 | 8000 | s_80 | s_800 | s_8000 | 2223-07-06 02:13:20 | 80 | 800 | 8000 | 8000 | 8000 | 8000 | 8000 + s_81 | s_810 | s_8100 | 2226-09-06 12:00:00 | 81 | 810 | 8100 | 8100 | 8100 | 8100 | 8100 | s_81 | s_810 | s_8100 | 2226-09-06 12:00:00 | 81 | 810 | 8100 | 8100 | 8100 | 8100 | 8100 + s_82 | s_820 | s_8200 | 2229-11-06 09:46:40 | 82 | 820 | 8200 | 8200 | 8200 | 8200 | 8200 | s_82 | s_820 | s_8200 | 2229-11-06 09:46:40 | 82 | 820 | 8200 | 8200 | 8200 | 8200 | 8200 + s_83 | s_830 | s_8300 | 2233-01-06 07:33:20 | 83 | 830 | 8300 | 8300 | 8300 | 8300 | 8300 | s_83 | s_830 | s_8300 | 2233-01-06 07:33:20 | 83 | 830 | 8300 | 8300 | 8300 | 8300 | 8300 + s_84 | s_840 | s_8400 | 2236-03-09 05:20:00 | 84 | 840 | 8400 | 8400 | 8400 | 8400 | 8400 | s_84 | s_840 | s_8400 | 2236-03-09 05:20:00 | 84 | 840 | 8400 | 8400 | 8400 | 8400 | 8400 + s_85 | s_850 | s_8500 | 2239-05-10 03:06:40 | 85 | 850 | 8500 | 8500 | 8500 | 8500 | 8500 | s_85 | s_850 | s_8500 | 2239-05-10 03:06:40 | 85 | 850 | 8500 | 8500 | 8500 | 8500 | 8500 + s_86 | s_860 | s_8600 | 2242-07-11 12:53:20 | 86 | 860 | 8600 | 8600 | 8600 | 8600 | 8600 | s_86 | s_860 | s_8600 | 2242-07-11 12:53:20 | 86 | 860 | 8600 | 8600 | 8600 | 8600 | 8600 + s_87 | s_870 | s_8700 | 2245-09-10 10:40:00 | 87 | 870 | 8700 | 8700 | 8700 | 8700 | 8700 | s_87 | s_870 | s_8700 | 2245-09-10 10:40:00 | 87 | 870 | 8700 | 8700 | 8700 | 8700 | 8700 + s_88 | s_880 | s_8800 | 2248-11-10 08:26:40 | 88 | 880 | 8800 | 8800 | 8800 | 8800 | 8800 | s_88 | s_880 | s_8800 | 2248-11-10 08:26:40 | 88 | 880 | 8800 | 8800 | 8800 | 8800 | 8800 + s_89 | s_890 | s_8900 | 2252-01-12 06:13:20 | 89 | 890 | 8900 | 8900 | 8900 | 8900 | 8900 | s_89 | s_890 | s_8900 | 2252-01-12 06:13:20 | 89 | 890 | 8900 | 8900 | 8900 | 8900 | 8900 + s_90 | s_900 | s_9000 | 2255-03-14 04:00:00 | 90 | 900 | 9000 | 9000 | 9000 | 9000 | 9000 | s_90 | s_900 | s_9000 | 2255-03-14 04:00:00 | 90 | 900 | 9000 | 9000 | 9000 | 9000 | 9000 + s_91 | s_910 | s_9100 | 2258-05-15 01:46:40 | 91 | 910 | 9100 | 9100 | 9100 | 9100 | 9100 | s_91 | s_910 | s_9100 | 2258-05-15 01:46:40 | 91 | 910 | 9100 | 9100 | 9100 | 9100 | 9100 + s_92 | s_920 | s_9200 | 2261-07-15 11:33:20 | 92 | 920 | 9200 | 9200 | 9200 | 9200 | 9200 | s_92 | s_920 | s_9200 | 2261-07-15 11:33:20 | 92 | 920 | 9200 | 9200 | 9200 | 9200 | 9200 + s_93 | s_930 | s_9300 | 2264-09-14 09:20:00 | 93 | 930 | 9300 | 9300 | 9300 | 9300 | 9300 | s_93 | s_930 | s_9300 | 2264-09-14 09:20:00 | 93 | 930 | 9300 | 9300 | 9300 | 9300 | 9300 + s_94 | s_940 | s_9400 | 2267-11-16 07:06:40 | 94 | 940 | 9400 | 9400 | 9400 | 9400 | 9400 | s_94 | s_940 | s_9400 | 2267-11-16 07:06:40 | 94 | 940 | 9400 | 9400 | 9400 | 9400 | 9400 + s_95 | s_950 | s_9500 | 2271-01-16 04:53:20 | 95 | 950 | 9500 | 9500 | 9500 | 9500 | 9500 | s_95 | s_950 | s_9500 | 2271-01-16 04:53:20 | 95 | 950 | 9500 | 9500 | 9500 | 9500 | 9500 + s_96 | s_960 | s_9600 | 2274-03-19 02:40:00 | 96 | 960 | 9600 | 9600 | 9600 | 9600 | 9600 | s_96 | s_960 | s_9600 | 2274-03-19 02:40:00 | 96 | 960 | 9600 | 9600 | 9600 | 9600 | 9600 + s_97 | s_970 | s_9700 | 2277-05-19 12:26:40 | 97 | 970 | 9700 | 9700 | 9700 | 9700 | 9700 | s_97 | s_970 | s_9700 | 2277-05-19 12:26:40 | 97 | 970 | 9700 | 9700 | 9700 | 9700 | 9700 + s_98 | s_980 | s_9800 | 2280-07-19 10:13:20 | 98 | 980 | 9800 | 9800 | 9800 | 9800 | 9800 | s_98 | s_980 | s_9800 | 2280-07-19 10:13:20 | 98 | 980 | 9800 | 9800 | 9800 | 9800 | 9800 + s_99 | s_990 | s_9900 | 2283-09-20 08:00:00 | 99 | 990 | 9900 | 9900 | 9900 | 9900 | 9900 | s_99 | s_990 | s_9900 | 2283-09-20 08:00:00 | 99 | 990 | 9900 | 9900 | 9900 | 9900 | 9900 + s_100 | s_1000 | s_10000 | 2286-11-20 05:46:40 | 100 | 1000 | 10000 | 10000 | 10000 | 10000 | 10000 | s_100 | s_1000 | s_10000 | 2286-11-20 05:46:40 | 100 | 1000 | 10000 | 10000 | 10000 | 10000 | 10000 +(100 rows) + +SELECT * from pxf_multibyte_fourbyte_data_with_skip ORDER BY n1; + s1 | s2 | s3 | d1 | n1 | n2 | n3 | n4 | n5 | n6 | n7 | s11 | s12 | s13 | d11 | n11 | n12 | n13 | n14 | n15 | n16 | n17 +-------+--------+---------+---------------------+-----+------+-------+-------+-------+-------+-------+-------+--------+---------+---------------------+-----+------+-------+-------+-------+-------+------- + s_11 | s_110 | s_1100 | 2004-11-09 11:33:20 | 11 | 110 | 1100 | 1100 | 1100 | 1100 | 1100 | s_11 | s_110 | s_1100 | 2004-11-09 11:33:20 | 11 | 110 | 1100 | 1100 | 1100 | 1100 | 1100 + s_12 | s_120 | s_1200 | 2008-01-10 09:20:00 | 12 | 120 | 1200 | 1200 | 1200 | 1200 | 1200 | s_12 | s_120 | s_1200 | 2008-01-10 09:20:00 | 12 | 120 | 1200 | 1200 | 1200 | 1200 | 1200 + s_13 | s_130 | s_1300 | 2011-03-13 07:06:40 | 13 | 130 | 1300 | 1300 | 1300 | 1300 | 1300 | s_13 | s_130 | s_1300 | 2011-03-13 07:06:40 | 13 | 130 | 1300 | 1300 | 1300 | 1300 | 1300 + s_14 | s_140 | s_1400 | 2014-05-13 04:53:20 | 14 | 140 | 1400 | 1400 | 1400 | 1400 | 1400 | s_14 | s_140 | s_1400 | 2014-05-13 04:53:20 | 14 | 140 | 1400 | 1400 | 1400 | 1400 | 1400 + s_15 | s_150 | s_1500 | 2017-07-14 02:40:00 | 15 | 150 | 1500 | 1500 | 1500 | 1500 | 1500 | s_15 | s_150 | s_1500 | 2017-07-14 02:40:00 | 15 | 150 | 1500 | 1500 | 1500 | 1500 | 1500 + s_16 | s_160 | s_1600 | 2020-09-13 12:26:40 | 16 | 160 | 1600 | 1600 | 1600 | 1600 | 1600 | s_16 | s_160 | s_1600 | 2020-09-13 12:26:40 | 16 | 160 | 1600 | 1600 | 1600 | 1600 | 1600 + s_17 | s_170 | s_1700 | 2023-11-14 10:13:20 | 17 | 170 | 1700 | 1700 | 1700 | 1700 | 1700 | s_17 | s_170 | s_1700 | 2023-11-14 10:13:20 | 17 | 170 | 1700 | 1700 | 1700 | 1700 | 1700 + s_18 | s_180 | s_1800 | 2027-01-15 08:00:00 | 18 | 180 | 1800 | 1800 | 1800 | 1800 | 1800 | s_18 | s_180 | s_1800 | 2027-01-15 08:00:00 | 18 | 180 | 1800 | 1800 | 1800 | 1800 | 1800 + s_19 | s_190 | s_1900 | 2030-03-17 05:46:40 | 19 | 190 | 1900 | 1900 | 1900 | 1900 | 1900 | s_19 | s_190 | s_1900 | 2030-03-17 05:46:40 | 19 | 190 | 1900 | 1900 | 1900 | 1900 | 1900 + s_20 | s_200 | s_2000 | 2033-05-18 03:33:20 | 20 | 200 | 2000 | 2000 | 2000 | 2000 | 2000 | s_20 | s_200 | s_2000 | 2033-05-18 03:33:20 | 20 | 200 | 2000 | 2000 | 2000 | 2000 | 2000 + s_21 | s_210 | s_2100 | 2036-07-18 01:20:00 | 21 | 210 | 2100 | 2100 | 2100 | 2100 | 2100 | s_21 | s_210 | s_2100 | 2036-07-18 01:20:00 | 21 | 210 | 2100 | 2100 | 2100 | 2100 | 2100 + s_22 | s_220 | s_2200 | 2039-09-18 11:06:40 | 22 | 220 | 2200 | 2200 | 2200 | 2200 | 2200 | s_22 | s_220 | s_2200 | 2039-09-18 11:06:40 | 22 | 220 | 2200 | 2200 | 2200 | 2200 | 2200 + s_23 | s_230 | s_2300 | 2042-11-19 08:53:20 | 23 | 230 | 2300 | 2300 | 2300 | 2300 | 2300 | s_23 | s_230 | s_2300 | 2042-11-19 08:53:20 | 23 | 230 | 2300 | 2300 | 2300 | 2300 | 2300 + s_24 | s_240 | s_2400 | 2046-01-19 06:40:00 | 24 | 240 | 2400 | 2400 | 2400 | 2400 | 2400 | s_24 | s_240 | s_2400 | 2046-01-19 06:40:00 | 24 | 240 | 2400 | 2400 | 2400 | 2400 | 2400 + s_25 | s_250 | s_2500 | 2049-03-22 04:26:40 | 25 | 250 | 2500 | 2500 | 2500 | 2500 | 2500 | s_25 | s_250 | s_2500 | 2049-03-22 04:26:40 | 25 | 250 | 2500 | 2500 | 2500 | 2500 | 2500 + s_26 | s_260 | s_2600 | 2052-05-22 02:13:20 | 26 | 260 | 2600 | 2600 | 2600 | 2600 | 2600 | s_26 | s_260 | s_2600 | 2052-05-22 02:13:20 | 26 | 260 | 2600 | 2600 | 2600 | 2600 | 2600 + s_27 | s_270 | s_2700 | 2055-07-24 12:00:00 | 27 | 270 | 2700 | 2700 | 2700 | 2700 | 2700 | s_27 | s_270 | s_2700 | 2055-07-24 12:00:00 | 27 | 270 | 2700 | 2700 | 2700 | 2700 | 2700 + s_28 | s_280 | s_2800 | 2058-09-23 09:46:40 | 28 | 280 | 2800 | 2800 | 2800 | 2800 | 2800 | s_28 | s_280 | s_2800 | 2058-09-23 09:46:40 | 28 | 280 | 2800 | 2800 | 2800 | 2800 | 2800 + s_29 | s_290 | s_2900 | 2061-11-23 07:33:20 | 29 | 290 | 2900 | 2900 | 2900 | 2900 | 2900 | s_29 | s_290 | s_2900 | 2061-11-23 07:33:20 | 29 | 290 | 2900 | 2900 | 2900 | 2900 | 2900 + s_30 | s_300 | s_3000 | 2065-01-24 05:20:00 | 30 | 300 | 3000 | 3000 | 3000 | 3000 | 3000 | s_30 | s_300 | s_3000 | 2065-01-24 05:20:00 | 30 | 300 | 3000 | 3000 | 3000 | 3000 | 3000 + s_31 | s_310 | s_3100 | 2068-03-26 03:06:40 | 31 | 310 | 3100 | 3100 | 3100 | 3100 | 3100 | s_31 | s_310 | s_3100 | 2068-03-26 03:06:40 | 31 | 310 | 3100 | 3100 | 3100 | 3100 | 3100 + s_32 | s_320 | s_3200 | 2071-05-28 12:53:20 | 32 | 320 | 3200 | 3200 | 3200 | 3200 | 3200 | s_32 | s_320 | s_3200 | 2071-05-28 12:53:20 | 32 | 320 | 3200 | 3200 | 3200 | 3200 | 3200 + s_33 | s_330 | s_3300 | 2074-07-28 10:40:00 | 33 | 330 | 3300 | 3300 | 3300 | 3300 | 3300 | s_33 | s_330 | s_3300 | 2074-07-28 10:40:00 | 33 | 330 | 3300 | 3300 | 3300 | 3300 | 3300 + s_34 | s_340 | s_3400 | 2077-09-27 08:26:40 | 34 | 340 | 3400 | 3400 | 3400 | 3400 | 3400 | s_34 | s_340 | s_3400 | 2077-09-27 08:26:40 | 34 | 340 | 3400 | 3400 | 3400 | 3400 | 3400 + s_35 | s_350 | s_3500 | 2080-11-28 06:13:20 | 35 | 350 | 3500 | 3500 | 3500 | 3500 | 3500 | s_35 | s_350 | s_3500 | 2080-11-28 06:13:20 | 35 | 350 | 3500 | 3500 | 3500 | 3500 | 3500 + s_36 | s_360 | s_3600 | 2084-01-29 04:00:00 | 36 | 360 | 3600 | 3600 | 3600 | 3600 | 3600 | s_36 | s_360 | s_3600 | 2084-01-29 04:00:00 | 36 | 360 | 3600 | 3600 | 3600 | 3600 | 3600 + s_37 | s_370 | s_3700 | 2087-04-01 01:46:40 | 37 | 370 | 3700 | 3700 | 3700 | 3700 | 3700 | s_37 | s_370 | s_3700 | 2087-04-01 01:46:40 | 37 | 370 | 3700 | 3700 | 3700 | 3700 | 3700 + s_38 | s_380 | s_3800 | 2090-06-01 11:33:20 | 38 | 380 | 3800 | 3800 | 3800 | 3800 | 3800 | s_38 | s_380 | s_3800 | 2090-06-01 11:33:20 | 38 | 380 | 3800 | 3800 | 3800 | 3800 | 3800 + s_39 | s_390 | s_3900 | 2093-08-01 09:20:00 | 39 | 390 | 3900 | 3900 | 3900 | 3900 | 3900 | s_39 | s_390 | s_3900 | 2093-08-01 09:20:00 | 39 | 390 | 3900 | 3900 | 3900 | 3900 | 3900 + s_40 | s_400 | s_4000 | 2096-10-02 07:06:40 | 40 | 400 | 4000 | 4000 | 4000 | 4000 | 4000 | s_40 | s_400 | s_4000 | 2096-10-02 07:06:40 | 40 | 400 | 4000 | 4000 | 4000 | 4000 | 4000 + s_41 | s_410 | s_4100 | 2099-12-03 04:53:20 | 41 | 410 | 4100 | 4100 | 4100 | 4100 | 4100 | s_41 | s_410 | s_4100 | 2099-12-03 04:53:20 | 41 | 410 | 4100 | 4100 | 4100 | 4100 | 4100 + s_42 | s_420 | s_4200 | 2103-02-04 02:40:00 | 42 | 420 | 4200 | 4200 | 4200 | 4200 | 4200 | s_42 | s_420 | s_4200 | 2103-02-04 02:40:00 | 42 | 420 | 4200 | 4200 | 4200 | 4200 | 4200 + s_43 | s_430 | s_4300 | 2106-04-06 12:26:40 | 43 | 430 | 4300 | 4300 | 4300 | 4300 | 4300 | s_43 | s_430 | s_4300 | 2106-04-06 12:26:40 | 43 | 430 | 4300 | 4300 | 4300 | 4300 | 4300 + s_44 | s_440 | s_4400 | 2109-06-06 10:13:20 | 44 | 440 | 4400 | 4400 | 4400 | 4400 | 4400 | s_44 | s_440 | s_4400 | 2109-06-06 10:13:20 | 44 | 440 | 4400 | 4400 | 4400 | 4400 | 4400 + s_45 | s_450 | s_4500 | 2112-08-07 08:00:00 | 45 | 450 | 4500 | 4500 | 4500 | 4500 | 4500 | s_45 | s_450 | s_4500 | 2112-08-07 08:00:00 | 45 | 450 | 4500 | 4500 | 4500 | 4500 | 4500 + s_46 | s_460 | s_4600 | 2115-10-08 05:46:40 | 46 | 460 | 4600 | 4600 | 4600 | 4600 | 4600 | s_46 | s_460 | s_4600 | 2115-10-08 05:46:40 | 46 | 460 | 4600 | 4600 | 4600 | 4600 | 4600 + s_47 | s_470 | s_4700 | 2118-12-09 03:33:20 | 47 | 470 | 4700 | 4700 | 4700 | 4700 | 4700 | s_47 | s_470 | s_4700 | 2118-12-09 03:33:20 | 47 | 470 | 4700 | 4700 | 4700 | 4700 | 4700 + s_48 | s_480 | s_4800 | 2122-02-08 01:20:00 | 48 | 480 | 4800 | 4800 | 4800 | 4800 | 4800 | s_48 | s_480 | s_4800 | 2122-02-08 01:20:00 | 48 | 480 | 4800 | 4800 | 4800 | 4800 | 4800 + s_49 | s_490 | s_4900 | 2125-04-10 11:06:40 | 49 | 490 | 4900 | 4900 | 4900 | 4900 | 4900 | s_49 | s_490 | s_4900 | 2125-04-10 11:06:40 | 49 | 490 | 4900 | 4900 | 4900 | 4900 | 4900 + s_50 | s_500 | s_5000 | 2128-06-11 08:53:20 | 50 | 500 | 5000 | 5000 | 5000 | 5000 | 5000 | s_50 | s_500 | s_5000 | 2128-06-11 08:53:20 | 50 | 500 | 5000 | 5000 | 5000 | 5000 | 5000 + s_51 | s_510 | s_5100 | 2131-08-12 06:40:00 | 51 | 510 | 5100 | 5100 | 5100 | 5100 | 5100 | s_51 | s_510 | s_5100 | 2131-08-12 06:40:00 | 51 | 510 | 5100 | 5100 | 5100 | 5100 | 5100 + s_52 | s_520 | s_5200 | 2134-10-13 04:26:40 | 52 | 520 | 5200 | 5200 | 5200 | 5200 | 5200 | s_52 | s_520 | s_5200 | 2134-10-13 04:26:40 | 52 | 520 | 5200 | 5200 | 5200 | 5200 | 5200 + s_53 | s_530 | s_5300 | 2137-12-13 02:13:20 | 53 | 530 | 5300 | 5300 | 5300 | 5300 | 5300 | s_53 | s_530 | s_5300 | 2137-12-13 02:13:20 | 53 | 530 | 5300 | 5300 | 5300 | 5300 | 5300 + s_54 | s_540 | s_5400 | 2141-02-13 12:00:00 | 54 | 540 | 5400 | 5400 | 5400 | 5400 | 5400 | s_54 | s_540 | s_5400 | 2141-02-13 12:00:00 | 54 | 540 | 5400 | 5400 | 5400 | 5400 | 5400 + s_55 | s_550 | s_5500 | 2144-04-15 09:46:40 | 55 | 550 | 5500 | 5500 | 5500 | 5500 | 5500 | s_55 | s_550 | s_5500 | 2144-04-15 09:46:40 | 55 | 550 | 5500 | 5500 | 5500 | 5500 | 5500 + s_56 | s_560 | s_5600 | 2147-06-16 07:33:20 | 56 | 560 | 5600 | 5600 | 5600 | 5600 | 5600 | s_56 | s_560 | s_5600 | 2147-06-16 07:33:20 | 56 | 560 | 5600 | 5600 | 5600 | 5600 | 5600 + s_57 | s_570 | s_5700 | 2150-08-17 05:20:00 | 57 | 570 | 5700 | 5700 | 5700 | 5700 | 5700 | s_57 | s_570 | s_5700 | 2150-08-17 05:20:00 | 57 | 570 | 5700 | 5700 | 5700 | 5700 | 5700 + s_58 | s_580 | s_5800 | 2153-10-17 03:06:40 | 58 | 580 | 5800 | 5800 | 5800 | 5800 | 5800 | s_58 | s_580 | s_5800 | 2153-10-17 03:06:40 | 58 | 580 | 5800 | 5800 | 5800 | 5800 | 5800 + s_59 | s_590 | s_5900 | 2156-12-18 12:53:20 | 59 | 590 | 5900 | 5900 | 5900 | 5900 | 5900 | s_59 | s_590 | s_5900 | 2156-12-18 12:53:20 | 59 | 590 | 5900 | 5900 | 5900 | 5900 | 5900 + s_60 | s_600 | s_6000 | 2160-02-18 10:40:00 | 60 | 600 | 6000 | 6000 | 6000 | 6000 | 6000 | s_60 | s_600 | s_6000 | 2160-02-18 10:40:00 | 60 | 600 | 6000 | 6000 | 6000 | 6000 | 6000 + s_61 | s_610 | s_6100 | 2163-04-20 08:26:40 | 61 | 610 | 6100 | 6100 | 6100 | 6100 | 6100 | s_61 | s_610 | s_6100 | 2163-04-20 08:26:40 | 61 | 610 | 6100 | 6100 | 6100 | 6100 | 6100 + s_62 | s_620 | s_6200 | 2166-06-21 06:13:20 | 62 | 620 | 6200 | 6200 | 6200 | 6200 | 6200 | s_62 | s_620 | s_6200 | 2166-06-21 06:13:20 | 62 | 620 | 6200 | 6200 | 6200 | 6200 | 6200 + s_63 | s_630 | s_6300 | 2169-08-21 04:00:00 | 63 | 630 | 6300 | 6300 | 6300 | 6300 | 6300 | s_63 | s_630 | s_6300 | 2169-08-21 04:00:00 | 63 | 630 | 6300 | 6300 | 6300 | 6300 | 6300 + s_64 | s_640 | s_6400 | 2172-10-22 01:46:40 | 64 | 640 | 6400 | 6400 | 6400 | 6400 | 6400 | s_64 | s_640 | s_6400 | 2172-10-22 01:46:40 | 64 | 640 | 6400 | 6400 | 6400 | 6400 | 6400 + s_65 | s_650 | s_6500 | 2175-12-23 11:33:20 | 65 | 650 | 6500 | 6500 | 6500 | 6500 | 6500 | s_65 | s_650 | s_6500 | 2175-12-23 11:33:20 | 65 | 650 | 6500 | 6500 | 6500 | 6500 | 6500 + s_66 | s_660 | s_6600 | 2179-02-22 09:20:00 | 66 | 660 | 6600 | 6600 | 6600 | 6600 | 6600 | s_66 | s_660 | s_6600 | 2179-02-22 09:20:00 | 66 | 660 | 6600 | 6600 | 6600 | 6600 | 6600 + s_67 | s_670 | s_6700 | 2182-04-25 07:06:40 | 67 | 670 | 6700 | 6700 | 6700 | 6700 | 6700 | s_67 | s_670 | s_6700 | 2182-04-25 07:06:40 | 67 | 670 | 6700 | 6700 | 6700 | 6700 | 6700 + s_68 | s_680 | s_6800 | 2185-06-25 04:53:20 | 68 | 680 | 6800 | 6800 | 6800 | 6800 | 6800 | s_68 | s_680 | s_6800 | 2185-06-25 04:53:20 | 68 | 680 | 6800 | 6800 | 6800 | 6800 | 6800 + s_69 | s_690 | s_6900 | 2188-08-26 02:40:00 | 69 | 690 | 6900 | 6900 | 6900 | 6900 | 6900 | s_69 | s_690 | s_6900 | 2188-08-26 02:40:00 | 69 | 690 | 6900 | 6900 | 6900 | 6900 | 6900 + s_70 | s_700 | s_7000 | 2191-10-27 12:26:40 | 70 | 700 | 7000 | 7000 | 7000 | 7000 | 7000 | s_70 | s_700 | s_7000 | 2191-10-27 12:26:40 | 70 | 700 | 7000 | 7000 | 7000 | 7000 | 7000 + s_71 | s_710 | s_7100 | 2194-12-27 10:13:20 | 71 | 710 | 7100 | 7100 | 7100 | 7100 | 7100 | s_71 | s_710 | s_7100 | 2194-12-27 10:13:20 | 71 | 710 | 7100 | 7100 | 7100 | 7100 | 7100 + s_72 | s_720 | s_7200 | 2198-02-27 08:00:00 | 72 | 720 | 7200 | 7200 | 7200 | 7200 | 7200 | s_72 | s_720 | s_7200 | 2198-02-27 08:00:00 | 72 | 720 | 7200 | 7200 | 7200 | 7200 | 7200 + s_73 | s_730 | s_7300 | 2201-04-30 05:46:40 | 73 | 730 | 7300 | 7300 | 7300 | 7300 | 7300 | s_73 | s_730 | s_7300 | 2201-04-30 05:46:40 | 73 | 730 | 7300 | 7300 | 7300 | 7300 | 7300 + s_74 | s_740 | s_7400 | 2204-07-01 03:33:20 | 74 | 740 | 7400 | 7400 | 7400 | 7400 | 7400 | s_74 | s_740 | s_7400 | 2204-07-01 03:33:20 | 74 | 740 | 7400 | 7400 | 7400 | 7400 | 7400 + s_75 | s_750 | s_7500 | 2207-09-01 01:20:00 | 75 | 750 | 7500 | 7500 | 7500 | 7500 | 7500 | s_75 | s_750 | s_7500 | 2207-09-01 01:20:00 | 75 | 750 | 7500 | 7500 | 7500 | 7500 | 7500 + s_76 | s_760 | s_7600 | 2210-11-01 11:06:40 | 76 | 760 | 7600 | 7600 | 7600 | 7600 | 7600 | s_76 | s_760 | s_7600 | 2210-11-01 11:06:40 | 76 | 760 | 7600 | 7600 | 7600 | 7600 | 7600 + s_77 | s_770 | s_7700 | 2214-01-02 08:53:20 | 77 | 770 | 7700 | 7700 | 7700 | 7700 | 7700 | s_77 | s_770 | s_7700 | 2214-01-02 08:53:20 | 77 | 770 | 7700 | 7700 | 7700 | 7700 | 7700 + s_78 | s_780 | s_7800 | 2217-03-04 06:40:00 | 78 | 780 | 7800 | 7800 | 7800 | 7800 | 7800 | s_78 | s_780 | s_7800 | 2217-03-04 06:40:00 | 78 | 780 | 7800 | 7800 | 7800 | 7800 | 7800 + s_79 | s_790 | s_7900 | 2220-05-05 04:26:40 | 79 | 790 | 7900 | 7900 | 7900 | 7900 | 7900 | s_79 | s_790 | s_7900 | 2220-05-05 04:26:40 | 79 | 790 | 7900 | 7900 | 7900 | 7900 | 7900 + s_80 | s_800 | s_8000 | 2223-07-06 02:13:20 | 80 | 800 | 8000 | 8000 | 8000 | 8000 | 8000 | s_80 | s_800 | s_8000 | 2223-07-06 02:13:20 | 80 | 800 | 8000 | 8000 | 8000 | 8000 | 8000 + s_81 | s_810 | s_8100 | 2226-09-06 12:00:00 | 81 | 810 | 8100 | 8100 | 8100 | 8100 | 8100 | s_81 | s_810 | s_8100 | 2226-09-06 12:00:00 | 81 | 810 | 8100 | 8100 | 8100 | 8100 | 8100 + s_82 | s_820 | s_8200 | 2229-11-06 09:46:40 | 82 | 820 | 8200 | 8200 | 8200 | 8200 | 8200 | s_82 | s_820 | s_8200 | 2229-11-06 09:46:40 | 82 | 820 | 8200 | 8200 | 8200 | 8200 | 8200 + s_83 | s_830 | s_8300 | 2233-01-06 07:33:20 | 83 | 830 | 8300 | 8300 | 8300 | 8300 | 8300 | s_83 | s_830 | s_8300 | 2233-01-06 07:33:20 | 83 | 830 | 8300 | 8300 | 8300 | 8300 | 8300 + s_84 | s_840 | s_8400 | 2236-03-09 05:20:00 | 84 | 840 | 8400 | 8400 | 8400 | 8400 | 8400 | s_84 | s_840 | s_8400 | 2236-03-09 05:20:00 | 84 | 840 | 8400 | 8400 | 8400 | 8400 | 8400 + s_85 | s_850 | s_8500 | 2239-05-10 03:06:40 | 85 | 850 | 8500 | 8500 | 8500 | 8500 | 8500 | s_85 | s_850 | s_8500 | 2239-05-10 03:06:40 | 85 | 850 | 8500 | 8500 | 8500 | 8500 | 8500 + s_86 | s_860 | s_8600 | 2242-07-11 12:53:20 | 86 | 860 | 8600 | 8600 | 8600 | 8600 | 8600 | s_86 | s_860 | s_8600 | 2242-07-11 12:53:20 | 86 | 860 | 8600 | 8600 | 8600 | 8600 | 8600 + s_87 | s_870 | s_8700 | 2245-09-10 10:40:00 | 87 | 870 | 8700 | 8700 | 8700 | 8700 | 8700 | s_87 | s_870 | s_8700 | 2245-09-10 10:40:00 | 87 | 870 | 8700 | 8700 | 8700 | 8700 | 8700 + s_88 | s_880 | s_8800 | 2248-11-10 08:26:40 | 88 | 880 | 8800 | 8800 | 8800 | 8800 | 8800 | s_88 | s_880 | s_8800 | 2248-11-10 08:26:40 | 88 | 880 | 8800 | 8800 | 8800 | 8800 | 8800 + s_89 | s_890 | s_8900 | 2252-01-12 06:13:20 | 89 | 890 | 8900 | 8900 | 8900 | 8900 | 8900 | s_89 | s_890 | s_8900 | 2252-01-12 06:13:20 | 89 | 890 | 8900 | 8900 | 8900 | 8900 | 8900 + s_90 | s_900 | s_9000 | 2255-03-14 04:00:00 | 90 | 900 | 9000 | 9000 | 9000 | 9000 | 9000 | s_90 | s_900 | s_9000 | 2255-03-14 04:00:00 | 90 | 900 | 9000 | 9000 | 9000 | 9000 | 9000 + s_91 | s_910 | s_9100 | 2258-05-15 01:46:40 | 91 | 910 | 9100 | 9100 | 9100 | 9100 | 9100 | s_91 | s_910 | s_9100 | 2258-05-15 01:46:40 | 91 | 910 | 9100 | 9100 | 9100 | 9100 | 9100 + s_92 | s_920 | s_9200 | 2261-07-15 11:33:20 | 92 | 920 | 9200 | 9200 | 9200 | 9200 | 9200 | s_92 | s_920 | s_9200 | 2261-07-15 11:33:20 | 92 | 920 | 9200 | 9200 | 9200 | 9200 | 9200 + s_93 | s_930 | s_9300 | 2264-09-14 09:20:00 | 93 | 930 | 9300 | 9300 | 9300 | 9300 | 9300 | s_93 | s_930 | s_9300 | 2264-09-14 09:20:00 | 93 | 930 | 9300 | 9300 | 9300 | 9300 | 9300 + s_94 | s_940 | s_9400 | 2267-11-16 07:06:40 | 94 | 940 | 9400 | 9400 | 9400 | 9400 | 9400 | s_94 | s_940 | s_9400 | 2267-11-16 07:06:40 | 94 | 940 | 9400 | 9400 | 9400 | 9400 | 9400 + s_95 | s_950 | s_9500 | 2271-01-16 04:53:20 | 95 | 950 | 9500 | 9500 | 9500 | 9500 | 9500 | s_95 | s_950 | s_9500 | 2271-01-16 04:53:20 | 95 | 950 | 9500 | 9500 | 9500 | 9500 | 9500 + s_96 | s_960 | s_9600 | 2274-03-19 02:40:00 | 96 | 960 | 9600 | 9600 | 9600 | 9600 | 9600 | s_96 | s_960 | s_9600 | 2274-03-19 02:40:00 | 96 | 960 | 9600 | 9600 | 9600 | 9600 | 9600 + s_97 | s_970 | s_9700 | 2277-05-19 12:26:40 | 97 | 970 | 9700 | 9700 | 9700 | 9700 | 9700 | s_97 | s_970 | s_9700 | 2277-05-19 12:26:40 | 97 | 970 | 9700 | 9700 | 9700 | 9700 | 9700 + s_98 | s_980 | s_9800 | 2280-07-19 10:13:20 | 98 | 980 | 9800 | 9800 | 9800 | 9800 | 9800 | s_98 | s_980 | s_9800 | 2280-07-19 10:13:20 | 98 | 980 | 9800 | 9800 | 9800 | 9800 | 9800 + s_99 | s_990 | s_9900 | 2283-09-20 08:00:00 | 99 | 990 | 9900 | 9900 | 9900 | 9900 | 9900 | s_99 | s_990 | s_9900 | 2283-09-20 08:00:00 | 99 | 990 | 9900 | 9900 | 9900 | 9900 | 9900 + s_100 | s_1000 | s_10000 | 2286-11-20 05:46:40 | 100 | 1000 | 10000 | 10000 | 10000 | 10000 | 10000 | s_100 | s_1000 | s_10000 | 2286-11-20 05:46:40 | 100 | 1000 | 10000 | 10000 | 10000 | 10000 | 10000 +(90 rows) \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/four_byte/runTest.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/four_byte/runTest.py new file mode 100755 index 0000000000..500cd9322d --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/four_byte/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfMultibyteFourByteDelimiterData(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/four_byte/sql/query01.sql b/automation/tincrepo/main/pxf/features/multibyte_delimiter/four_byte/sql/query01.sql new file mode 100755 index 0000000000..e4296c2c57 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/four_byte/sql/query01.sql @@ -0,0 +1,5 @@ +-- @description query01 for PXF Multibyte delimiter, 4-byte delim cases + +SELECT * from pxf_multibyte_fourbyte_data ORDER BY n1; + +SELECT * from pxf_multibyte_fourbyte_data_with_skip ORDER BY n1; \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/multi_char/__init__.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/multi_char/__init__.py new file mode 100755 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/multi_char/expected/query01.ans b/automation/tincrepo/main/pxf/features/multibyte_delimiter/multi_char/expected/query01.ans new file mode 100755 index 0000000000..ad629380d2 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/multi_char/expected/query01.ans @@ -0,0 +1,201 @@ +-- @description query01 for PXF Multibyte delimiter, multi-character delim cases + +SELECT * from pxf_multibyte_multichar_data ORDER BY n1; + s1 | s2 | s3 | d1 | n1 | n2 | n3 | n4 | n5 | n6 | n7 | s11 | s12 | s13 | d11 | n11 | n12 | n13 | n14 | n15 | n16 | n17 +-------+--------+---------+---------------------+-----+------+-------+-------+-------+-------+-------+-------+--------+---------+---------------------+-----+------+-------+-------+-------+-------+------- + s_1 | s_10 | s_100 | 1973-03-03 09:46:40 | 1 | 10 | 100 | 100 | 100 | 100 | 100 | s_1 | s_10 | s_100 | 1973-03-03 09:46:40 | 1 | 10 | 100 | 100 | 100 | 100 | 100 + s_2 | s_20 | s_200 | 1976-05-03 07:33:20 | 2 | 20 | 200 | 200 | 200 | 200 | 200 | s_2 | s_20 | s_200 | 1976-05-03 07:33:20 | 2 | 20 | 200 | 200 | 200 | 200 | 200 + s_3 | s_30 | s_300 | 1979-07-05 05:20:00 | 3 | 30 | 300 | 300 | 300 | 300 | 300 | s_3 | s_30 | s_300 | 1979-07-05 05:20:00 | 3 | 30 | 300 | 300 | 300 | 300 | 300 + s_4 | s_40 | s_400 | 1982-09-04 03:06:40 | 4 | 40 | 400 | 400 | 400 | 400 | 400 | s_4 | s_40 | s_400 | 1982-09-04 03:06:40 | 4 | 40 | 400 | 400 | 400 | 400 | 400 + s_5 | s_50 | s_500 | 1985-11-05 12:53:20 | 5 | 50 | 500 | 500 | 500 | 500 | 500 | s_5 | s_50 | s_500 | 1985-11-05 12:53:20 | 5 | 50 | 500 | 500 | 500 | 500 | 500 + s_6 | s_60 | s_600 | 1989-01-05 10:40:00 | 6 | 60 | 600 | 600 | 600 | 600 | 600 | s_6 | s_60 | s_600 | 1989-01-05 10:40:00 | 6 | 60 | 600 | 600 | 600 | 600 | 600 + s_7 | s_70 | s_700 | 1992-03-07 08:26:40 | 7 | 70 | 700 | 700 | 700 | 700 | 700 | s_7 | s_70 | s_700 | 1992-03-07 08:26:40 | 7 | 70 | 700 | 700 | 700 | 700 | 700 + s_8 | s_80 | s_800 | 1995-05-09 06:13:20 | 8 | 80 | 800 | 800 | 800 | 800 | 800 | s_8 | s_80 | s_800 | 1995-05-09 06:13:20 | 8 | 80 | 800 | 800 | 800 | 800 | 800 + s_9 | s_90 | s_900 | 1998-07-09 04:00:00 | 9 | 90 | 900 | 900 | 900 | 900 | 900 | s_9 | s_90 | s_900 | 1998-07-09 04:00:00 | 9 | 90 | 900 | 900 | 900 | 900 | 900 + s_10 | s_100 | s_1000 | 2001-09-09 01:46:40 | 10 | 100 | 1000 | 1000 | 1000 | 1000 | 1000 | s_10 | s_100 | s_1000 | 2001-09-09 01:46:40 | 10 | 100 | 1000 | 1000 | 1000 | 1000 | 1000 + s_11 | s_110 | s_1100 | 2004-11-09 11:33:20 | 11 | 110 | 1100 | 1100 | 1100 | 1100 | 1100 | s_11 | s_110 | s_1100 | 2004-11-09 11:33:20 | 11 | 110 | 1100 | 1100 | 1100 | 1100 | 1100 + s_12 | s_120 | s_1200 | 2008-01-10 09:20:00 | 12 | 120 | 1200 | 1200 | 1200 | 1200 | 1200 | s_12 | s_120 | s_1200 | 2008-01-10 09:20:00 | 12 | 120 | 1200 | 1200 | 1200 | 1200 | 1200 + s_13 | s_130 | s_1300 | 2011-03-13 07:06:40 | 13 | 130 | 1300 | 1300 | 1300 | 1300 | 1300 | s_13 | s_130 | s_1300 | 2011-03-13 07:06:40 | 13 | 130 | 1300 | 1300 | 1300 | 1300 | 1300 + s_14 | s_140 | s_1400 | 2014-05-13 04:53:20 | 14 | 140 | 1400 | 1400 | 1400 | 1400 | 1400 | s_14 | s_140 | s_1400 | 2014-05-13 04:53:20 | 14 | 140 | 1400 | 1400 | 1400 | 1400 | 1400 + s_15 | s_150 | s_1500 | 2017-07-14 02:40:00 | 15 | 150 | 1500 | 1500 | 1500 | 1500 | 1500 | s_15 | s_150 | s_1500 | 2017-07-14 02:40:00 | 15 | 150 | 1500 | 1500 | 1500 | 1500 | 1500 + s_16 | s_160 | s_1600 | 2020-09-13 12:26:40 | 16 | 160 | 1600 | 1600 | 1600 | 1600 | 1600 | s_16 | s_160 | s_1600 | 2020-09-13 12:26:40 | 16 | 160 | 1600 | 1600 | 1600 | 1600 | 1600 + s_17 | s_170 | s_1700 | 2023-11-14 10:13:20 | 17 | 170 | 1700 | 1700 | 1700 | 1700 | 1700 | s_17 | s_170 | s_1700 | 2023-11-14 10:13:20 | 17 | 170 | 1700 | 1700 | 1700 | 1700 | 1700 + s_18 | s_180 | s_1800 | 2027-01-15 08:00:00 | 18 | 180 | 1800 | 1800 | 1800 | 1800 | 1800 | s_18 | s_180 | s_1800 | 2027-01-15 08:00:00 | 18 | 180 | 1800 | 1800 | 1800 | 1800 | 1800 + s_19 | s_190 | s_1900 | 2030-03-17 05:46:40 | 19 | 190 | 1900 | 1900 | 1900 | 1900 | 1900 | s_19 | s_190 | s_1900 | 2030-03-17 05:46:40 | 19 | 190 | 1900 | 1900 | 1900 | 1900 | 1900 + s_20 | s_200 | s_2000 | 2033-05-18 03:33:20 | 20 | 200 | 2000 | 2000 | 2000 | 2000 | 2000 | s_20 | s_200 | s_2000 | 2033-05-18 03:33:20 | 20 | 200 | 2000 | 2000 | 2000 | 2000 | 2000 + s_21 | s_210 | s_2100 | 2036-07-18 01:20:00 | 21 | 210 | 2100 | 2100 | 2100 | 2100 | 2100 | s_21 | s_210 | s_2100 | 2036-07-18 01:20:00 | 21 | 210 | 2100 | 2100 | 2100 | 2100 | 2100 + s_22 | s_220 | s_2200 | 2039-09-18 11:06:40 | 22 | 220 | 2200 | 2200 | 2200 | 2200 | 2200 | s_22 | s_220 | s_2200 | 2039-09-18 11:06:40 | 22 | 220 | 2200 | 2200 | 2200 | 2200 | 2200 + s_23 | s_230 | s_2300 | 2042-11-19 08:53:20 | 23 | 230 | 2300 | 2300 | 2300 | 2300 | 2300 | s_23 | s_230 | s_2300 | 2042-11-19 08:53:20 | 23 | 230 | 2300 | 2300 | 2300 | 2300 | 2300 + s_24 | s_240 | s_2400 | 2046-01-19 06:40:00 | 24 | 240 | 2400 | 2400 | 2400 | 2400 | 2400 | s_24 | s_240 | s_2400 | 2046-01-19 06:40:00 | 24 | 240 | 2400 | 2400 | 2400 | 2400 | 2400 + s_25 | s_250 | s_2500 | 2049-03-22 04:26:40 | 25 | 250 | 2500 | 2500 | 2500 | 2500 | 2500 | s_25 | s_250 | s_2500 | 2049-03-22 04:26:40 | 25 | 250 | 2500 | 2500 | 2500 | 2500 | 2500 + s_26 | s_260 | s_2600 | 2052-05-22 02:13:20 | 26 | 260 | 2600 | 2600 | 2600 | 2600 | 2600 | s_26 | s_260 | s_2600 | 2052-05-22 02:13:20 | 26 | 260 | 2600 | 2600 | 2600 | 2600 | 2600 + s_27 | s_270 | s_2700 | 2055-07-24 12:00:00 | 27 | 270 | 2700 | 2700 | 2700 | 2700 | 2700 | s_27 | s_270 | s_2700 | 2055-07-24 12:00:00 | 27 | 270 | 2700 | 2700 | 2700 | 2700 | 2700 + s_28 | s_280 | s_2800 | 2058-09-23 09:46:40 | 28 | 280 | 2800 | 2800 | 2800 | 2800 | 2800 | s_28 | s_280 | s_2800 | 2058-09-23 09:46:40 | 28 | 280 | 2800 | 2800 | 2800 | 2800 | 2800 + s_29 | s_290 | s_2900 | 2061-11-23 07:33:20 | 29 | 290 | 2900 | 2900 | 2900 | 2900 | 2900 | s_29 | s_290 | s_2900 | 2061-11-23 07:33:20 | 29 | 290 | 2900 | 2900 | 2900 | 2900 | 2900 + s_30 | s_300 | s_3000 | 2065-01-24 05:20:00 | 30 | 300 | 3000 | 3000 | 3000 | 3000 | 3000 | s_30 | s_300 | s_3000 | 2065-01-24 05:20:00 | 30 | 300 | 3000 | 3000 | 3000 | 3000 | 3000 + s_31 | s_310 | s_3100 | 2068-03-26 03:06:40 | 31 | 310 | 3100 | 3100 | 3100 | 3100 | 3100 | s_31 | s_310 | s_3100 | 2068-03-26 03:06:40 | 31 | 310 | 3100 | 3100 | 3100 | 3100 | 3100 + s_32 | s_320 | s_3200 | 2071-05-28 12:53:20 | 32 | 320 | 3200 | 3200 | 3200 | 3200 | 3200 | s_32 | s_320 | s_3200 | 2071-05-28 12:53:20 | 32 | 320 | 3200 | 3200 | 3200 | 3200 | 3200 + s_33 | s_330 | s_3300 | 2074-07-28 10:40:00 | 33 | 330 | 3300 | 3300 | 3300 | 3300 | 3300 | s_33 | s_330 | s_3300 | 2074-07-28 10:40:00 | 33 | 330 | 3300 | 3300 | 3300 | 3300 | 3300 + s_34 | s_340 | s_3400 | 2077-09-27 08:26:40 | 34 | 340 | 3400 | 3400 | 3400 | 3400 | 3400 | s_34 | s_340 | s_3400 | 2077-09-27 08:26:40 | 34 | 340 | 3400 | 3400 | 3400 | 3400 | 3400 + s_35 | s_350 | s_3500 | 2080-11-28 06:13:20 | 35 | 350 | 3500 | 3500 | 3500 | 3500 | 3500 | s_35 | s_350 | s_3500 | 2080-11-28 06:13:20 | 35 | 350 | 3500 | 3500 | 3500 | 3500 | 3500 + s_36 | s_360 | s_3600 | 2084-01-29 04:00:00 | 36 | 360 | 3600 | 3600 | 3600 | 3600 | 3600 | s_36 | s_360 | s_3600 | 2084-01-29 04:00:00 | 36 | 360 | 3600 | 3600 | 3600 | 3600 | 3600 + s_37 | s_370 | s_3700 | 2087-04-01 01:46:40 | 37 | 370 | 3700 | 3700 | 3700 | 3700 | 3700 | s_37 | s_370 | s_3700 | 2087-04-01 01:46:40 | 37 | 370 | 3700 | 3700 | 3700 | 3700 | 3700 + s_38 | s_380 | s_3800 | 2090-06-01 11:33:20 | 38 | 380 | 3800 | 3800 | 3800 | 3800 | 3800 | s_38 | s_380 | s_3800 | 2090-06-01 11:33:20 | 38 | 380 | 3800 | 3800 | 3800 | 3800 | 3800 + s_39 | s_390 | s_3900 | 2093-08-01 09:20:00 | 39 | 390 | 3900 | 3900 | 3900 | 3900 | 3900 | s_39 | s_390 | s_3900 | 2093-08-01 09:20:00 | 39 | 390 | 3900 | 3900 | 3900 | 3900 | 3900 + s_40 | s_400 | s_4000 | 2096-10-02 07:06:40 | 40 | 400 | 4000 | 4000 | 4000 | 4000 | 4000 | s_40 | s_400 | s_4000 | 2096-10-02 07:06:40 | 40 | 400 | 4000 | 4000 | 4000 | 4000 | 4000 + s_41 | s_410 | s_4100 | 2099-12-03 04:53:20 | 41 | 410 | 4100 | 4100 | 4100 | 4100 | 4100 | s_41 | s_410 | s_4100 | 2099-12-03 04:53:20 | 41 | 410 | 4100 | 4100 | 4100 | 4100 | 4100 + s_42 | s_420 | s_4200 | 2103-02-04 02:40:00 | 42 | 420 | 4200 | 4200 | 4200 | 4200 | 4200 | s_42 | s_420 | s_4200 | 2103-02-04 02:40:00 | 42 | 420 | 4200 | 4200 | 4200 | 4200 | 4200 + s_43 | s_430 | s_4300 | 2106-04-06 12:26:40 | 43 | 430 | 4300 | 4300 | 4300 | 4300 | 4300 | s_43 | s_430 | s_4300 | 2106-04-06 12:26:40 | 43 | 430 | 4300 | 4300 | 4300 | 4300 | 4300 + s_44 | s_440 | s_4400 | 2109-06-06 10:13:20 | 44 | 440 | 4400 | 4400 | 4400 | 4400 | 4400 | s_44 | s_440 | s_4400 | 2109-06-06 10:13:20 | 44 | 440 | 4400 | 4400 | 4400 | 4400 | 4400 + s_45 | s_450 | s_4500 | 2112-08-07 08:00:00 | 45 | 450 | 4500 | 4500 | 4500 | 4500 | 4500 | s_45 | s_450 | s_4500 | 2112-08-07 08:00:00 | 45 | 450 | 4500 | 4500 | 4500 | 4500 | 4500 + s_46 | s_460 | s_4600 | 2115-10-08 05:46:40 | 46 | 460 | 4600 | 4600 | 4600 | 4600 | 4600 | s_46 | s_460 | s_4600 | 2115-10-08 05:46:40 | 46 | 460 | 4600 | 4600 | 4600 | 4600 | 4600 + s_47 | s_470 | s_4700 | 2118-12-09 03:33:20 | 47 | 470 | 4700 | 4700 | 4700 | 4700 | 4700 | s_47 | s_470 | s_4700 | 2118-12-09 03:33:20 | 47 | 470 | 4700 | 4700 | 4700 | 4700 | 4700 + s_48 | s_480 | s_4800 | 2122-02-08 01:20:00 | 48 | 480 | 4800 | 4800 | 4800 | 4800 | 4800 | s_48 | s_480 | s_4800 | 2122-02-08 01:20:00 | 48 | 480 | 4800 | 4800 | 4800 | 4800 | 4800 + s_49 | s_490 | s_4900 | 2125-04-10 11:06:40 | 49 | 490 | 4900 | 4900 | 4900 | 4900 | 4900 | s_49 | s_490 | s_4900 | 2125-04-10 11:06:40 | 49 | 490 | 4900 | 4900 | 4900 | 4900 | 4900 + s_50 | s_500 | s_5000 | 2128-06-11 08:53:20 | 50 | 500 | 5000 | 5000 | 5000 | 5000 | 5000 | s_50 | s_500 | s_5000 | 2128-06-11 08:53:20 | 50 | 500 | 5000 | 5000 | 5000 | 5000 | 5000 + s_51 | s_510 | s_5100 | 2131-08-12 06:40:00 | 51 | 510 | 5100 | 5100 | 5100 | 5100 | 5100 | s_51 | s_510 | s_5100 | 2131-08-12 06:40:00 | 51 | 510 | 5100 | 5100 | 5100 | 5100 | 5100 + s_52 | s_520 | s_5200 | 2134-10-13 04:26:40 | 52 | 520 | 5200 | 5200 | 5200 | 5200 | 5200 | s_52 | s_520 | s_5200 | 2134-10-13 04:26:40 | 52 | 520 | 5200 | 5200 | 5200 | 5200 | 5200 + s_53 | s_530 | s_5300 | 2137-12-13 02:13:20 | 53 | 530 | 5300 | 5300 | 5300 | 5300 | 5300 | s_53 | s_530 | s_5300 | 2137-12-13 02:13:20 | 53 | 530 | 5300 | 5300 | 5300 | 5300 | 5300 + s_54 | s_540 | s_5400 | 2141-02-13 12:00:00 | 54 | 540 | 5400 | 5400 | 5400 | 5400 | 5400 | s_54 | s_540 | s_5400 | 2141-02-13 12:00:00 | 54 | 540 | 5400 | 5400 | 5400 | 5400 | 5400 + s_55 | s_550 | s_5500 | 2144-04-15 09:46:40 | 55 | 550 | 5500 | 5500 | 5500 | 5500 | 5500 | s_55 | s_550 | s_5500 | 2144-04-15 09:46:40 | 55 | 550 | 5500 | 5500 | 5500 | 5500 | 5500 + s_56 | s_560 | s_5600 | 2147-06-16 07:33:20 | 56 | 560 | 5600 | 5600 | 5600 | 5600 | 5600 | s_56 | s_560 | s_5600 | 2147-06-16 07:33:20 | 56 | 560 | 5600 | 5600 | 5600 | 5600 | 5600 + s_57 | s_570 | s_5700 | 2150-08-17 05:20:00 | 57 | 570 | 5700 | 5700 | 5700 | 5700 | 5700 | s_57 | s_570 | s_5700 | 2150-08-17 05:20:00 | 57 | 570 | 5700 | 5700 | 5700 | 5700 | 5700 + s_58 | s_580 | s_5800 | 2153-10-17 03:06:40 | 58 | 580 | 5800 | 5800 | 5800 | 5800 | 5800 | s_58 | s_580 | s_5800 | 2153-10-17 03:06:40 | 58 | 580 | 5800 | 5800 | 5800 | 5800 | 5800 + s_59 | s_590 | s_5900 | 2156-12-18 12:53:20 | 59 | 590 | 5900 | 5900 | 5900 | 5900 | 5900 | s_59 | s_590 | s_5900 | 2156-12-18 12:53:20 | 59 | 590 | 5900 | 5900 | 5900 | 5900 | 5900 + s_60 | s_600 | s_6000 | 2160-02-18 10:40:00 | 60 | 600 | 6000 | 6000 | 6000 | 6000 | 6000 | s_60 | s_600 | s_6000 | 2160-02-18 10:40:00 | 60 | 600 | 6000 | 6000 | 6000 | 6000 | 6000 + s_61 | s_610 | s_6100 | 2163-04-20 08:26:40 | 61 | 610 | 6100 | 6100 | 6100 | 6100 | 6100 | s_61 | s_610 | s_6100 | 2163-04-20 08:26:40 | 61 | 610 | 6100 | 6100 | 6100 | 6100 | 6100 + s_62 | s_620 | s_6200 | 2166-06-21 06:13:20 | 62 | 620 | 6200 | 6200 | 6200 | 6200 | 6200 | s_62 | s_620 | s_6200 | 2166-06-21 06:13:20 | 62 | 620 | 6200 | 6200 | 6200 | 6200 | 6200 + s_63 | s_630 | s_6300 | 2169-08-21 04:00:00 | 63 | 630 | 6300 | 6300 | 6300 | 6300 | 6300 | s_63 | s_630 | s_6300 | 2169-08-21 04:00:00 | 63 | 630 | 6300 | 6300 | 6300 | 6300 | 6300 + s_64 | s_640 | s_6400 | 2172-10-22 01:46:40 | 64 | 640 | 6400 | 6400 | 6400 | 6400 | 6400 | s_64 | s_640 | s_6400 | 2172-10-22 01:46:40 | 64 | 640 | 6400 | 6400 | 6400 | 6400 | 6400 + s_65 | s_650 | s_6500 | 2175-12-23 11:33:20 | 65 | 650 | 6500 | 6500 | 6500 | 6500 | 6500 | s_65 | s_650 | s_6500 | 2175-12-23 11:33:20 | 65 | 650 | 6500 | 6500 | 6500 | 6500 | 6500 + s_66 | s_660 | s_6600 | 2179-02-22 09:20:00 | 66 | 660 | 6600 | 6600 | 6600 | 6600 | 6600 | s_66 | s_660 | s_6600 | 2179-02-22 09:20:00 | 66 | 660 | 6600 | 6600 | 6600 | 6600 | 6600 + s_67 | s_670 | s_6700 | 2182-04-25 07:06:40 | 67 | 670 | 6700 | 6700 | 6700 | 6700 | 6700 | s_67 | s_670 | s_6700 | 2182-04-25 07:06:40 | 67 | 670 | 6700 | 6700 | 6700 | 6700 | 6700 + s_68 | s_680 | s_6800 | 2185-06-25 04:53:20 | 68 | 680 | 6800 | 6800 | 6800 | 6800 | 6800 | s_68 | s_680 | s_6800 | 2185-06-25 04:53:20 | 68 | 680 | 6800 | 6800 | 6800 | 6800 | 6800 + s_69 | s_690 | s_6900 | 2188-08-26 02:40:00 | 69 | 690 | 6900 | 6900 | 6900 | 6900 | 6900 | s_69 | s_690 | s_6900 | 2188-08-26 02:40:00 | 69 | 690 | 6900 | 6900 | 6900 | 6900 | 6900 + s_70 | s_700 | s_7000 | 2191-10-27 12:26:40 | 70 | 700 | 7000 | 7000 | 7000 | 7000 | 7000 | s_70 | s_700 | s_7000 | 2191-10-27 12:26:40 | 70 | 700 | 7000 | 7000 | 7000 | 7000 | 7000 + s_71 | s_710 | s_7100 | 2194-12-27 10:13:20 | 71 | 710 | 7100 | 7100 | 7100 | 7100 | 7100 | s_71 | s_710 | s_7100 | 2194-12-27 10:13:20 | 71 | 710 | 7100 | 7100 | 7100 | 7100 | 7100 + s_72 | s_720 | s_7200 | 2198-02-27 08:00:00 | 72 | 720 | 7200 | 7200 | 7200 | 7200 | 7200 | s_72 | s_720 | s_7200 | 2198-02-27 08:00:00 | 72 | 720 | 7200 | 7200 | 7200 | 7200 | 7200 + s_73 | s_730 | s_7300 | 2201-04-30 05:46:40 | 73 | 730 | 7300 | 7300 | 7300 | 7300 | 7300 | s_73 | s_730 | s_7300 | 2201-04-30 05:46:40 | 73 | 730 | 7300 | 7300 | 7300 | 7300 | 7300 + s_74 | s_740 | s_7400 | 2204-07-01 03:33:20 | 74 | 740 | 7400 | 7400 | 7400 | 7400 | 7400 | s_74 | s_740 | s_7400 | 2204-07-01 03:33:20 | 74 | 740 | 7400 | 7400 | 7400 | 7400 | 7400 + s_75 | s_750 | s_7500 | 2207-09-01 01:20:00 | 75 | 750 | 7500 | 7500 | 7500 | 7500 | 7500 | s_75 | s_750 | s_7500 | 2207-09-01 01:20:00 | 75 | 750 | 7500 | 7500 | 7500 | 7500 | 7500 + s_76 | s_760 | s_7600 | 2210-11-01 11:06:40 | 76 | 760 | 7600 | 7600 | 7600 | 7600 | 7600 | s_76 | s_760 | s_7600 | 2210-11-01 11:06:40 | 76 | 760 | 7600 | 7600 | 7600 | 7600 | 7600 + s_77 | s_770 | s_7700 | 2214-01-02 08:53:20 | 77 | 770 | 7700 | 7700 | 7700 | 7700 | 7700 | s_77 | s_770 | s_7700 | 2214-01-02 08:53:20 | 77 | 770 | 7700 | 7700 | 7700 | 7700 | 7700 + s_78 | s_780 | s_7800 | 2217-03-04 06:40:00 | 78 | 780 | 7800 | 7800 | 7800 | 7800 | 7800 | s_78 | s_780 | s_7800 | 2217-03-04 06:40:00 | 78 | 780 | 7800 | 7800 | 7800 | 7800 | 7800 + s_79 | s_790 | s_7900 | 2220-05-05 04:26:40 | 79 | 790 | 7900 | 7900 | 7900 | 7900 | 7900 | s_79 | s_790 | s_7900 | 2220-05-05 04:26:40 | 79 | 790 | 7900 | 7900 | 7900 | 7900 | 7900 + s_80 | s_800 | s_8000 | 2223-07-06 02:13:20 | 80 | 800 | 8000 | 8000 | 8000 | 8000 | 8000 | s_80 | s_800 | s_8000 | 2223-07-06 02:13:20 | 80 | 800 | 8000 | 8000 | 8000 | 8000 | 8000 + s_81 | s_810 | s_8100 | 2226-09-06 12:00:00 | 81 | 810 | 8100 | 8100 | 8100 | 8100 | 8100 | s_81 | s_810 | s_8100 | 2226-09-06 12:00:00 | 81 | 810 | 8100 | 8100 | 8100 | 8100 | 8100 + s_82 | s_820 | s_8200 | 2229-11-06 09:46:40 | 82 | 820 | 8200 | 8200 | 8200 | 8200 | 8200 | s_82 | s_820 | s_8200 | 2229-11-06 09:46:40 | 82 | 820 | 8200 | 8200 | 8200 | 8200 | 8200 + s_83 | s_830 | s_8300 | 2233-01-06 07:33:20 | 83 | 830 | 8300 | 8300 | 8300 | 8300 | 8300 | s_83 | s_830 | s_8300 | 2233-01-06 07:33:20 | 83 | 830 | 8300 | 8300 | 8300 | 8300 | 8300 + s_84 | s_840 | s_8400 | 2236-03-09 05:20:00 | 84 | 840 | 8400 | 8400 | 8400 | 8400 | 8400 | s_84 | s_840 | s_8400 | 2236-03-09 05:20:00 | 84 | 840 | 8400 | 8400 | 8400 | 8400 | 8400 + s_85 | s_850 | s_8500 | 2239-05-10 03:06:40 | 85 | 850 | 8500 | 8500 | 8500 | 8500 | 8500 | s_85 | s_850 | s_8500 | 2239-05-10 03:06:40 | 85 | 850 | 8500 | 8500 | 8500 | 8500 | 8500 + s_86 | s_860 | s_8600 | 2242-07-11 12:53:20 | 86 | 860 | 8600 | 8600 | 8600 | 8600 | 8600 | s_86 | s_860 | s_8600 | 2242-07-11 12:53:20 | 86 | 860 | 8600 | 8600 | 8600 | 8600 | 8600 + s_87 | s_870 | s_8700 | 2245-09-10 10:40:00 | 87 | 870 | 8700 | 8700 | 8700 | 8700 | 8700 | s_87 | s_870 | s_8700 | 2245-09-10 10:40:00 | 87 | 870 | 8700 | 8700 | 8700 | 8700 | 8700 + s_88 | s_880 | s_8800 | 2248-11-10 08:26:40 | 88 | 880 | 8800 | 8800 | 8800 | 8800 | 8800 | s_88 | s_880 | s_8800 | 2248-11-10 08:26:40 | 88 | 880 | 8800 | 8800 | 8800 | 8800 | 8800 + s_89 | s_890 | s_8900 | 2252-01-12 06:13:20 | 89 | 890 | 8900 | 8900 | 8900 | 8900 | 8900 | s_89 | s_890 | s_8900 | 2252-01-12 06:13:20 | 89 | 890 | 8900 | 8900 | 8900 | 8900 | 8900 + s_90 | s_900 | s_9000 | 2255-03-14 04:00:00 | 90 | 900 | 9000 | 9000 | 9000 | 9000 | 9000 | s_90 | s_900 | s_9000 | 2255-03-14 04:00:00 | 90 | 900 | 9000 | 9000 | 9000 | 9000 | 9000 + s_91 | s_910 | s_9100 | 2258-05-15 01:46:40 | 91 | 910 | 9100 | 9100 | 9100 | 9100 | 9100 | s_91 | s_910 | s_9100 | 2258-05-15 01:46:40 | 91 | 910 | 9100 | 9100 | 9100 | 9100 | 9100 + s_92 | s_920 | s_9200 | 2261-07-15 11:33:20 | 92 | 920 | 9200 | 9200 | 9200 | 9200 | 9200 | s_92 | s_920 | s_9200 | 2261-07-15 11:33:20 | 92 | 920 | 9200 | 9200 | 9200 | 9200 | 9200 + s_93 | s_930 | s_9300 | 2264-09-14 09:20:00 | 93 | 930 | 9300 | 9300 | 9300 | 9300 | 9300 | s_93 | s_930 | s_9300 | 2264-09-14 09:20:00 | 93 | 930 | 9300 | 9300 | 9300 | 9300 | 9300 + s_94 | s_940 | s_9400 | 2267-11-16 07:06:40 | 94 | 940 | 9400 | 9400 | 9400 | 9400 | 9400 | s_94 | s_940 | s_9400 | 2267-11-16 07:06:40 | 94 | 940 | 9400 | 9400 | 9400 | 9400 | 9400 + s_95 | s_950 | s_9500 | 2271-01-16 04:53:20 | 95 | 950 | 9500 | 9500 | 9500 | 9500 | 9500 | s_95 | s_950 | s_9500 | 2271-01-16 04:53:20 | 95 | 950 | 9500 | 9500 | 9500 | 9500 | 9500 + s_96 | s_960 | s_9600 | 2274-03-19 02:40:00 | 96 | 960 | 9600 | 9600 | 9600 | 9600 | 9600 | s_96 | s_960 | s_9600 | 2274-03-19 02:40:00 | 96 | 960 | 9600 | 9600 | 9600 | 9600 | 9600 + s_97 | s_970 | s_9700 | 2277-05-19 12:26:40 | 97 | 970 | 9700 | 9700 | 9700 | 9700 | 9700 | s_97 | s_970 | s_9700 | 2277-05-19 12:26:40 | 97 | 970 | 9700 | 9700 | 9700 | 9700 | 9700 + s_98 | s_980 | s_9800 | 2280-07-19 10:13:20 | 98 | 980 | 9800 | 9800 | 9800 | 9800 | 9800 | s_98 | s_980 | s_9800 | 2280-07-19 10:13:20 | 98 | 980 | 9800 | 9800 | 9800 | 9800 | 9800 + s_99 | s_990 | s_9900 | 2283-09-20 08:00:00 | 99 | 990 | 9900 | 9900 | 9900 | 9900 | 9900 | s_99 | s_990 | s_9900 | 2283-09-20 08:00:00 | 99 | 990 | 9900 | 9900 | 9900 | 9900 | 9900 + s_100 | s_1000 | s_10000 | 2286-11-20 05:46:40 | 100 | 1000 | 10000 | 10000 | 10000 | 10000 | 10000 | s_100 | s_1000 | s_10000 | 2286-11-20 05:46:40 | 100 | 1000 | 10000 | 10000 | 10000 | 10000 | 10000 +(100 rows) + +SELECT * from pxf_multibyte_multichar_data_with_skip ORDER BY n1; + s1 | s2 | s3 | d1 | n1 | n2 | n3 | n4 | n5 | n6 | n7 | s11 | s12 | s13 | d11 | n11 | n12 | n13 | n14 | n15 | n16 | n17 +-------+--------+---------+---------------------+-----+------+-------+-------+-------+-------+-------+-------+--------+---------+---------------------+-----+------+-------+-------+-------+-------+------- + s_11 | s_110 | s_1100 | 2004-11-09 11:33:20 | 11 | 110 | 1100 | 1100 | 1100 | 1100 | 1100 | s_11 | s_110 | s_1100 | 2004-11-09 11:33:20 | 11 | 110 | 1100 | 1100 | 1100 | 1100 | 1100 + s_12 | s_120 | s_1200 | 2008-01-10 09:20:00 | 12 | 120 | 1200 | 1200 | 1200 | 1200 | 1200 | s_12 | s_120 | s_1200 | 2008-01-10 09:20:00 | 12 | 120 | 1200 | 1200 | 1200 | 1200 | 1200 + s_13 | s_130 | s_1300 | 2011-03-13 07:06:40 | 13 | 130 | 1300 | 1300 | 1300 | 1300 | 1300 | s_13 | s_130 | s_1300 | 2011-03-13 07:06:40 | 13 | 130 | 1300 | 1300 | 1300 | 1300 | 1300 + s_14 | s_140 | s_1400 | 2014-05-13 04:53:20 | 14 | 140 | 1400 | 1400 | 1400 | 1400 | 1400 | s_14 | s_140 | s_1400 | 2014-05-13 04:53:20 | 14 | 140 | 1400 | 1400 | 1400 | 1400 | 1400 + s_15 | s_150 | s_1500 | 2017-07-14 02:40:00 | 15 | 150 | 1500 | 1500 | 1500 | 1500 | 1500 | s_15 | s_150 | s_1500 | 2017-07-14 02:40:00 | 15 | 150 | 1500 | 1500 | 1500 | 1500 | 1500 + s_16 | s_160 | s_1600 | 2020-09-13 12:26:40 | 16 | 160 | 1600 | 1600 | 1600 | 1600 | 1600 | s_16 | s_160 | s_1600 | 2020-09-13 12:26:40 | 16 | 160 | 1600 | 1600 | 1600 | 1600 | 1600 + s_17 | s_170 | s_1700 | 2023-11-14 10:13:20 | 17 | 170 | 1700 | 1700 | 1700 | 1700 | 1700 | s_17 | s_170 | s_1700 | 2023-11-14 10:13:20 | 17 | 170 | 1700 | 1700 | 1700 | 1700 | 1700 + s_18 | s_180 | s_1800 | 2027-01-15 08:00:00 | 18 | 180 | 1800 | 1800 | 1800 | 1800 | 1800 | s_18 | s_180 | s_1800 | 2027-01-15 08:00:00 | 18 | 180 | 1800 | 1800 | 1800 | 1800 | 1800 + s_19 | s_190 | s_1900 | 2030-03-17 05:46:40 | 19 | 190 | 1900 | 1900 | 1900 | 1900 | 1900 | s_19 | s_190 | s_1900 | 2030-03-17 05:46:40 | 19 | 190 | 1900 | 1900 | 1900 | 1900 | 1900 + s_20 | s_200 | s_2000 | 2033-05-18 03:33:20 | 20 | 200 | 2000 | 2000 | 2000 | 2000 | 2000 | s_20 | s_200 | s_2000 | 2033-05-18 03:33:20 | 20 | 200 | 2000 | 2000 | 2000 | 2000 | 2000 + s_21 | s_210 | s_2100 | 2036-07-18 01:20:00 | 21 | 210 | 2100 | 2100 | 2100 | 2100 | 2100 | s_21 | s_210 | s_2100 | 2036-07-18 01:20:00 | 21 | 210 | 2100 | 2100 | 2100 | 2100 | 2100 + s_22 | s_220 | s_2200 | 2039-09-18 11:06:40 | 22 | 220 | 2200 | 2200 | 2200 | 2200 | 2200 | s_22 | s_220 | s_2200 | 2039-09-18 11:06:40 | 22 | 220 | 2200 | 2200 | 2200 | 2200 | 2200 + s_23 | s_230 | s_2300 | 2042-11-19 08:53:20 | 23 | 230 | 2300 | 2300 | 2300 | 2300 | 2300 | s_23 | s_230 | s_2300 | 2042-11-19 08:53:20 | 23 | 230 | 2300 | 2300 | 2300 | 2300 | 2300 + s_24 | s_240 | s_2400 | 2046-01-19 06:40:00 | 24 | 240 | 2400 | 2400 | 2400 | 2400 | 2400 | s_24 | s_240 | s_2400 | 2046-01-19 06:40:00 | 24 | 240 | 2400 | 2400 | 2400 | 2400 | 2400 + s_25 | s_250 | s_2500 | 2049-03-22 04:26:40 | 25 | 250 | 2500 | 2500 | 2500 | 2500 | 2500 | s_25 | s_250 | s_2500 | 2049-03-22 04:26:40 | 25 | 250 | 2500 | 2500 | 2500 | 2500 | 2500 + s_26 | s_260 | s_2600 | 2052-05-22 02:13:20 | 26 | 260 | 2600 | 2600 | 2600 | 2600 | 2600 | s_26 | s_260 | s_2600 | 2052-05-22 02:13:20 | 26 | 260 | 2600 | 2600 | 2600 | 2600 | 2600 + s_27 | s_270 | s_2700 | 2055-07-24 12:00:00 | 27 | 270 | 2700 | 2700 | 2700 | 2700 | 2700 | s_27 | s_270 | s_2700 | 2055-07-24 12:00:00 | 27 | 270 | 2700 | 2700 | 2700 | 2700 | 2700 + s_28 | s_280 | s_2800 | 2058-09-23 09:46:40 | 28 | 280 | 2800 | 2800 | 2800 | 2800 | 2800 | s_28 | s_280 | s_2800 | 2058-09-23 09:46:40 | 28 | 280 | 2800 | 2800 | 2800 | 2800 | 2800 + s_29 | s_290 | s_2900 | 2061-11-23 07:33:20 | 29 | 290 | 2900 | 2900 | 2900 | 2900 | 2900 | s_29 | s_290 | s_2900 | 2061-11-23 07:33:20 | 29 | 290 | 2900 | 2900 | 2900 | 2900 | 2900 + s_30 | s_300 | s_3000 | 2065-01-24 05:20:00 | 30 | 300 | 3000 | 3000 | 3000 | 3000 | 3000 | s_30 | s_300 | s_3000 | 2065-01-24 05:20:00 | 30 | 300 | 3000 | 3000 | 3000 | 3000 | 3000 + s_31 | s_310 | s_3100 | 2068-03-26 03:06:40 | 31 | 310 | 3100 | 3100 | 3100 | 3100 | 3100 | s_31 | s_310 | s_3100 | 2068-03-26 03:06:40 | 31 | 310 | 3100 | 3100 | 3100 | 3100 | 3100 + s_32 | s_320 | s_3200 | 2071-05-28 12:53:20 | 32 | 320 | 3200 | 3200 | 3200 | 3200 | 3200 | s_32 | s_320 | s_3200 | 2071-05-28 12:53:20 | 32 | 320 | 3200 | 3200 | 3200 | 3200 | 3200 + s_33 | s_330 | s_3300 | 2074-07-28 10:40:00 | 33 | 330 | 3300 | 3300 | 3300 | 3300 | 3300 | s_33 | s_330 | s_3300 | 2074-07-28 10:40:00 | 33 | 330 | 3300 | 3300 | 3300 | 3300 | 3300 + s_34 | s_340 | s_3400 | 2077-09-27 08:26:40 | 34 | 340 | 3400 | 3400 | 3400 | 3400 | 3400 | s_34 | s_340 | s_3400 | 2077-09-27 08:26:40 | 34 | 340 | 3400 | 3400 | 3400 | 3400 | 3400 + s_35 | s_350 | s_3500 | 2080-11-28 06:13:20 | 35 | 350 | 3500 | 3500 | 3500 | 3500 | 3500 | s_35 | s_350 | s_3500 | 2080-11-28 06:13:20 | 35 | 350 | 3500 | 3500 | 3500 | 3500 | 3500 + s_36 | s_360 | s_3600 | 2084-01-29 04:00:00 | 36 | 360 | 3600 | 3600 | 3600 | 3600 | 3600 | s_36 | s_360 | s_3600 | 2084-01-29 04:00:00 | 36 | 360 | 3600 | 3600 | 3600 | 3600 | 3600 + s_37 | s_370 | s_3700 | 2087-04-01 01:46:40 | 37 | 370 | 3700 | 3700 | 3700 | 3700 | 3700 | s_37 | s_370 | s_3700 | 2087-04-01 01:46:40 | 37 | 370 | 3700 | 3700 | 3700 | 3700 | 3700 + s_38 | s_380 | s_3800 | 2090-06-01 11:33:20 | 38 | 380 | 3800 | 3800 | 3800 | 3800 | 3800 | s_38 | s_380 | s_3800 | 2090-06-01 11:33:20 | 38 | 380 | 3800 | 3800 | 3800 | 3800 | 3800 + s_39 | s_390 | s_3900 | 2093-08-01 09:20:00 | 39 | 390 | 3900 | 3900 | 3900 | 3900 | 3900 | s_39 | s_390 | s_3900 | 2093-08-01 09:20:00 | 39 | 390 | 3900 | 3900 | 3900 | 3900 | 3900 + s_40 | s_400 | s_4000 | 2096-10-02 07:06:40 | 40 | 400 | 4000 | 4000 | 4000 | 4000 | 4000 | s_40 | s_400 | s_4000 | 2096-10-02 07:06:40 | 40 | 400 | 4000 | 4000 | 4000 | 4000 | 4000 + s_41 | s_410 | s_4100 | 2099-12-03 04:53:20 | 41 | 410 | 4100 | 4100 | 4100 | 4100 | 4100 | s_41 | s_410 | s_4100 | 2099-12-03 04:53:20 | 41 | 410 | 4100 | 4100 | 4100 | 4100 | 4100 + s_42 | s_420 | s_4200 | 2103-02-04 02:40:00 | 42 | 420 | 4200 | 4200 | 4200 | 4200 | 4200 | s_42 | s_420 | s_4200 | 2103-02-04 02:40:00 | 42 | 420 | 4200 | 4200 | 4200 | 4200 | 4200 + s_43 | s_430 | s_4300 | 2106-04-06 12:26:40 | 43 | 430 | 4300 | 4300 | 4300 | 4300 | 4300 | s_43 | s_430 | s_4300 | 2106-04-06 12:26:40 | 43 | 430 | 4300 | 4300 | 4300 | 4300 | 4300 + s_44 | s_440 | s_4400 | 2109-06-06 10:13:20 | 44 | 440 | 4400 | 4400 | 4400 | 4400 | 4400 | s_44 | s_440 | s_4400 | 2109-06-06 10:13:20 | 44 | 440 | 4400 | 4400 | 4400 | 4400 | 4400 + s_45 | s_450 | s_4500 | 2112-08-07 08:00:00 | 45 | 450 | 4500 | 4500 | 4500 | 4500 | 4500 | s_45 | s_450 | s_4500 | 2112-08-07 08:00:00 | 45 | 450 | 4500 | 4500 | 4500 | 4500 | 4500 + s_46 | s_460 | s_4600 | 2115-10-08 05:46:40 | 46 | 460 | 4600 | 4600 | 4600 | 4600 | 4600 | s_46 | s_460 | s_4600 | 2115-10-08 05:46:40 | 46 | 460 | 4600 | 4600 | 4600 | 4600 | 4600 + s_47 | s_470 | s_4700 | 2118-12-09 03:33:20 | 47 | 470 | 4700 | 4700 | 4700 | 4700 | 4700 | s_47 | s_470 | s_4700 | 2118-12-09 03:33:20 | 47 | 470 | 4700 | 4700 | 4700 | 4700 | 4700 + s_48 | s_480 | s_4800 | 2122-02-08 01:20:00 | 48 | 480 | 4800 | 4800 | 4800 | 4800 | 4800 | s_48 | s_480 | s_4800 | 2122-02-08 01:20:00 | 48 | 480 | 4800 | 4800 | 4800 | 4800 | 4800 + s_49 | s_490 | s_4900 | 2125-04-10 11:06:40 | 49 | 490 | 4900 | 4900 | 4900 | 4900 | 4900 | s_49 | s_490 | s_4900 | 2125-04-10 11:06:40 | 49 | 490 | 4900 | 4900 | 4900 | 4900 | 4900 + s_50 | s_500 | s_5000 | 2128-06-11 08:53:20 | 50 | 500 | 5000 | 5000 | 5000 | 5000 | 5000 | s_50 | s_500 | s_5000 | 2128-06-11 08:53:20 | 50 | 500 | 5000 | 5000 | 5000 | 5000 | 5000 + s_51 | s_510 | s_5100 | 2131-08-12 06:40:00 | 51 | 510 | 5100 | 5100 | 5100 | 5100 | 5100 | s_51 | s_510 | s_5100 | 2131-08-12 06:40:00 | 51 | 510 | 5100 | 5100 | 5100 | 5100 | 5100 + s_52 | s_520 | s_5200 | 2134-10-13 04:26:40 | 52 | 520 | 5200 | 5200 | 5200 | 5200 | 5200 | s_52 | s_520 | s_5200 | 2134-10-13 04:26:40 | 52 | 520 | 5200 | 5200 | 5200 | 5200 | 5200 + s_53 | s_530 | s_5300 | 2137-12-13 02:13:20 | 53 | 530 | 5300 | 5300 | 5300 | 5300 | 5300 | s_53 | s_530 | s_5300 | 2137-12-13 02:13:20 | 53 | 530 | 5300 | 5300 | 5300 | 5300 | 5300 + s_54 | s_540 | s_5400 | 2141-02-13 12:00:00 | 54 | 540 | 5400 | 5400 | 5400 | 5400 | 5400 | s_54 | s_540 | s_5400 | 2141-02-13 12:00:00 | 54 | 540 | 5400 | 5400 | 5400 | 5400 | 5400 + s_55 | s_550 | s_5500 | 2144-04-15 09:46:40 | 55 | 550 | 5500 | 5500 | 5500 | 5500 | 5500 | s_55 | s_550 | s_5500 | 2144-04-15 09:46:40 | 55 | 550 | 5500 | 5500 | 5500 | 5500 | 5500 + s_56 | s_560 | s_5600 | 2147-06-16 07:33:20 | 56 | 560 | 5600 | 5600 | 5600 | 5600 | 5600 | s_56 | s_560 | s_5600 | 2147-06-16 07:33:20 | 56 | 560 | 5600 | 5600 | 5600 | 5600 | 5600 + s_57 | s_570 | s_5700 | 2150-08-17 05:20:00 | 57 | 570 | 5700 | 5700 | 5700 | 5700 | 5700 | s_57 | s_570 | s_5700 | 2150-08-17 05:20:00 | 57 | 570 | 5700 | 5700 | 5700 | 5700 | 5700 + s_58 | s_580 | s_5800 | 2153-10-17 03:06:40 | 58 | 580 | 5800 | 5800 | 5800 | 5800 | 5800 | s_58 | s_580 | s_5800 | 2153-10-17 03:06:40 | 58 | 580 | 5800 | 5800 | 5800 | 5800 | 5800 + s_59 | s_590 | s_5900 | 2156-12-18 12:53:20 | 59 | 590 | 5900 | 5900 | 5900 | 5900 | 5900 | s_59 | s_590 | s_5900 | 2156-12-18 12:53:20 | 59 | 590 | 5900 | 5900 | 5900 | 5900 | 5900 + s_60 | s_600 | s_6000 | 2160-02-18 10:40:00 | 60 | 600 | 6000 | 6000 | 6000 | 6000 | 6000 | s_60 | s_600 | s_6000 | 2160-02-18 10:40:00 | 60 | 600 | 6000 | 6000 | 6000 | 6000 | 6000 + s_61 | s_610 | s_6100 | 2163-04-20 08:26:40 | 61 | 610 | 6100 | 6100 | 6100 | 6100 | 6100 | s_61 | s_610 | s_6100 | 2163-04-20 08:26:40 | 61 | 610 | 6100 | 6100 | 6100 | 6100 | 6100 + s_62 | s_620 | s_6200 | 2166-06-21 06:13:20 | 62 | 620 | 6200 | 6200 | 6200 | 6200 | 6200 | s_62 | s_620 | s_6200 | 2166-06-21 06:13:20 | 62 | 620 | 6200 | 6200 | 6200 | 6200 | 6200 + s_63 | s_630 | s_6300 | 2169-08-21 04:00:00 | 63 | 630 | 6300 | 6300 | 6300 | 6300 | 6300 | s_63 | s_630 | s_6300 | 2169-08-21 04:00:00 | 63 | 630 | 6300 | 6300 | 6300 | 6300 | 6300 + s_64 | s_640 | s_6400 | 2172-10-22 01:46:40 | 64 | 640 | 6400 | 6400 | 6400 | 6400 | 6400 | s_64 | s_640 | s_6400 | 2172-10-22 01:46:40 | 64 | 640 | 6400 | 6400 | 6400 | 6400 | 6400 + s_65 | s_650 | s_6500 | 2175-12-23 11:33:20 | 65 | 650 | 6500 | 6500 | 6500 | 6500 | 6500 | s_65 | s_650 | s_6500 | 2175-12-23 11:33:20 | 65 | 650 | 6500 | 6500 | 6500 | 6500 | 6500 + s_66 | s_660 | s_6600 | 2179-02-22 09:20:00 | 66 | 660 | 6600 | 6600 | 6600 | 6600 | 6600 | s_66 | s_660 | s_6600 | 2179-02-22 09:20:00 | 66 | 660 | 6600 | 6600 | 6600 | 6600 | 6600 + s_67 | s_670 | s_6700 | 2182-04-25 07:06:40 | 67 | 670 | 6700 | 6700 | 6700 | 6700 | 6700 | s_67 | s_670 | s_6700 | 2182-04-25 07:06:40 | 67 | 670 | 6700 | 6700 | 6700 | 6700 | 6700 + s_68 | s_680 | s_6800 | 2185-06-25 04:53:20 | 68 | 680 | 6800 | 6800 | 6800 | 6800 | 6800 | s_68 | s_680 | s_6800 | 2185-06-25 04:53:20 | 68 | 680 | 6800 | 6800 | 6800 | 6800 | 6800 + s_69 | s_690 | s_6900 | 2188-08-26 02:40:00 | 69 | 690 | 6900 | 6900 | 6900 | 6900 | 6900 | s_69 | s_690 | s_6900 | 2188-08-26 02:40:00 | 69 | 690 | 6900 | 6900 | 6900 | 6900 | 6900 + s_70 | s_700 | s_7000 | 2191-10-27 12:26:40 | 70 | 700 | 7000 | 7000 | 7000 | 7000 | 7000 | s_70 | s_700 | s_7000 | 2191-10-27 12:26:40 | 70 | 700 | 7000 | 7000 | 7000 | 7000 | 7000 + s_71 | s_710 | s_7100 | 2194-12-27 10:13:20 | 71 | 710 | 7100 | 7100 | 7100 | 7100 | 7100 | s_71 | s_710 | s_7100 | 2194-12-27 10:13:20 | 71 | 710 | 7100 | 7100 | 7100 | 7100 | 7100 + s_72 | s_720 | s_7200 | 2198-02-27 08:00:00 | 72 | 720 | 7200 | 7200 | 7200 | 7200 | 7200 | s_72 | s_720 | s_7200 | 2198-02-27 08:00:00 | 72 | 720 | 7200 | 7200 | 7200 | 7200 | 7200 + s_73 | s_730 | s_7300 | 2201-04-30 05:46:40 | 73 | 730 | 7300 | 7300 | 7300 | 7300 | 7300 | s_73 | s_730 | s_7300 | 2201-04-30 05:46:40 | 73 | 730 | 7300 | 7300 | 7300 | 7300 | 7300 + s_74 | s_740 | s_7400 | 2204-07-01 03:33:20 | 74 | 740 | 7400 | 7400 | 7400 | 7400 | 7400 | s_74 | s_740 | s_7400 | 2204-07-01 03:33:20 | 74 | 740 | 7400 | 7400 | 7400 | 7400 | 7400 + s_75 | s_750 | s_7500 | 2207-09-01 01:20:00 | 75 | 750 | 7500 | 7500 | 7500 | 7500 | 7500 | s_75 | s_750 | s_7500 | 2207-09-01 01:20:00 | 75 | 750 | 7500 | 7500 | 7500 | 7500 | 7500 + s_76 | s_760 | s_7600 | 2210-11-01 11:06:40 | 76 | 760 | 7600 | 7600 | 7600 | 7600 | 7600 | s_76 | s_760 | s_7600 | 2210-11-01 11:06:40 | 76 | 760 | 7600 | 7600 | 7600 | 7600 | 7600 + s_77 | s_770 | s_7700 | 2214-01-02 08:53:20 | 77 | 770 | 7700 | 7700 | 7700 | 7700 | 7700 | s_77 | s_770 | s_7700 | 2214-01-02 08:53:20 | 77 | 770 | 7700 | 7700 | 7700 | 7700 | 7700 + s_78 | s_780 | s_7800 | 2217-03-04 06:40:00 | 78 | 780 | 7800 | 7800 | 7800 | 7800 | 7800 | s_78 | s_780 | s_7800 | 2217-03-04 06:40:00 | 78 | 780 | 7800 | 7800 | 7800 | 7800 | 7800 + s_79 | s_790 | s_7900 | 2220-05-05 04:26:40 | 79 | 790 | 7900 | 7900 | 7900 | 7900 | 7900 | s_79 | s_790 | s_7900 | 2220-05-05 04:26:40 | 79 | 790 | 7900 | 7900 | 7900 | 7900 | 7900 + s_80 | s_800 | s_8000 | 2223-07-06 02:13:20 | 80 | 800 | 8000 | 8000 | 8000 | 8000 | 8000 | s_80 | s_800 | s_8000 | 2223-07-06 02:13:20 | 80 | 800 | 8000 | 8000 | 8000 | 8000 | 8000 + s_81 | s_810 | s_8100 | 2226-09-06 12:00:00 | 81 | 810 | 8100 | 8100 | 8100 | 8100 | 8100 | s_81 | s_810 | s_8100 | 2226-09-06 12:00:00 | 81 | 810 | 8100 | 8100 | 8100 | 8100 | 8100 + s_82 | s_820 | s_8200 | 2229-11-06 09:46:40 | 82 | 820 | 8200 | 8200 | 8200 | 8200 | 8200 | s_82 | s_820 | s_8200 | 2229-11-06 09:46:40 | 82 | 820 | 8200 | 8200 | 8200 | 8200 | 8200 + s_83 | s_830 | s_8300 | 2233-01-06 07:33:20 | 83 | 830 | 8300 | 8300 | 8300 | 8300 | 8300 | s_83 | s_830 | s_8300 | 2233-01-06 07:33:20 | 83 | 830 | 8300 | 8300 | 8300 | 8300 | 8300 + s_84 | s_840 | s_8400 | 2236-03-09 05:20:00 | 84 | 840 | 8400 | 8400 | 8400 | 8400 | 8400 | s_84 | s_840 | s_8400 | 2236-03-09 05:20:00 | 84 | 840 | 8400 | 8400 | 8400 | 8400 | 8400 + s_85 | s_850 | s_8500 | 2239-05-10 03:06:40 | 85 | 850 | 8500 | 8500 | 8500 | 8500 | 8500 | s_85 | s_850 | s_8500 | 2239-05-10 03:06:40 | 85 | 850 | 8500 | 8500 | 8500 | 8500 | 8500 + s_86 | s_860 | s_8600 | 2242-07-11 12:53:20 | 86 | 860 | 8600 | 8600 | 8600 | 8600 | 8600 | s_86 | s_860 | s_8600 | 2242-07-11 12:53:20 | 86 | 860 | 8600 | 8600 | 8600 | 8600 | 8600 + s_87 | s_870 | s_8700 | 2245-09-10 10:40:00 | 87 | 870 | 8700 | 8700 | 8700 | 8700 | 8700 | s_87 | s_870 | s_8700 | 2245-09-10 10:40:00 | 87 | 870 | 8700 | 8700 | 8700 | 8700 | 8700 + s_88 | s_880 | s_8800 | 2248-11-10 08:26:40 | 88 | 880 | 8800 | 8800 | 8800 | 8800 | 8800 | s_88 | s_880 | s_8800 | 2248-11-10 08:26:40 | 88 | 880 | 8800 | 8800 | 8800 | 8800 | 8800 + s_89 | s_890 | s_8900 | 2252-01-12 06:13:20 | 89 | 890 | 8900 | 8900 | 8900 | 8900 | 8900 | s_89 | s_890 | s_8900 | 2252-01-12 06:13:20 | 89 | 890 | 8900 | 8900 | 8900 | 8900 | 8900 + s_90 | s_900 | s_9000 | 2255-03-14 04:00:00 | 90 | 900 | 9000 | 9000 | 9000 | 9000 | 9000 | s_90 | s_900 | s_9000 | 2255-03-14 04:00:00 | 90 | 900 | 9000 | 9000 | 9000 | 9000 | 9000 + s_91 | s_910 | s_9100 | 2258-05-15 01:46:40 | 91 | 910 | 9100 | 9100 | 9100 | 9100 | 9100 | s_91 | s_910 | s_9100 | 2258-05-15 01:46:40 | 91 | 910 | 9100 | 9100 | 9100 | 9100 | 9100 + s_92 | s_920 | s_9200 | 2261-07-15 11:33:20 | 92 | 920 | 9200 | 9200 | 9200 | 9200 | 9200 | s_92 | s_920 | s_9200 | 2261-07-15 11:33:20 | 92 | 920 | 9200 | 9200 | 9200 | 9200 | 9200 + s_93 | s_930 | s_9300 | 2264-09-14 09:20:00 | 93 | 930 | 9300 | 9300 | 9300 | 9300 | 9300 | s_93 | s_930 | s_9300 | 2264-09-14 09:20:00 | 93 | 930 | 9300 | 9300 | 9300 | 9300 | 9300 + s_94 | s_940 | s_9400 | 2267-11-16 07:06:40 | 94 | 940 | 9400 | 9400 | 9400 | 9400 | 9400 | s_94 | s_940 | s_9400 | 2267-11-16 07:06:40 | 94 | 940 | 9400 | 9400 | 9400 | 9400 | 9400 + s_95 | s_950 | s_9500 | 2271-01-16 04:53:20 | 95 | 950 | 9500 | 9500 | 9500 | 9500 | 9500 | s_95 | s_950 | s_9500 | 2271-01-16 04:53:20 | 95 | 950 | 9500 | 9500 | 9500 | 9500 | 9500 + s_96 | s_960 | s_9600 | 2274-03-19 02:40:00 | 96 | 960 | 9600 | 9600 | 9600 | 9600 | 9600 | s_96 | s_960 | s_9600 | 2274-03-19 02:40:00 | 96 | 960 | 9600 | 9600 | 9600 | 9600 | 9600 + s_97 | s_970 | s_9700 | 2277-05-19 12:26:40 | 97 | 970 | 9700 | 9700 | 9700 | 9700 | 9700 | s_97 | s_970 | s_9700 | 2277-05-19 12:26:40 | 97 | 970 | 9700 | 9700 | 9700 | 9700 | 9700 + s_98 | s_980 | s_9800 | 2280-07-19 10:13:20 | 98 | 980 | 9800 | 9800 | 9800 | 9800 | 9800 | s_98 | s_980 | s_9800 | 2280-07-19 10:13:20 | 98 | 980 | 9800 | 9800 | 9800 | 9800 | 9800 + s_99 | s_990 | s_9900 | 2283-09-20 08:00:00 | 99 | 990 | 9900 | 9900 | 9900 | 9900 | 9900 | s_99 | s_990 | s_9900 | 2283-09-20 08:00:00 | 99 | 990 | 9900 | 9900 | 9900 | 9900 | 9900 + s_100 | s_1000 | s_10000 | 2286-11-20 05:46:40 | 100 | 1000 | 10000 | 10000 | 10000 | 10000 | 10000 | s_100 | s_1000 | s_10000 | 2286-11-20 05:46:40 | 100 | 1000 | 10000 | 10000 | 10000 | 10000 | 10000 +(90 rows) \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/multi_char/runTest.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/multi_char/runTest.py new file mode 100755 index 0000000000..31d291f3ce --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/multi_char/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfMultibyteMultiCharByteDelimiterData(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/multi_char/sql/query01.sql b/automation/tincrepo/main/pxf/features/multibyte_delimiter/multi_char/sql/query01.sql new file mode 100755 index 0000000000..c81a76c0e0 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/multi_char/sql/query01.sql @@ -0,0 +1,5 @@ +-- @description query01 for PXF Multibyte delimiter, multi-character delim cases + +SELECT * from pxf_multibyte_multichar_data ORDER BY n1; + +SELECT * from pxf_multibyte_multichar_data_with_skip ORDER BY n1; \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/no_profile/__init__.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/no_profile/__init__.py new file mode 100755 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/no_profile/expected/query01.ans b/automation/tincrepo/main/pxf/features/multibyte_delimiter/no_profile/expected/query01.ans new file mode 100755 index 0000000000..e64f72f7ca --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/no_profile/expected/query01.ans @@ -0,0 +1,8 @@ +-- start_ignore +-- end_ignore +-- @description query01 for PXF Multibyte delimiter, no profile case + +SELECT * from pxf_multibyte_no_profile ORDER BY age; +ERROR: The "pxfdelimited_import" formatter only works with *:text or *:csv profiles. +HINT: Please double check the profile option in the external table definition. +CONTEXT: External table pxf_multibyte_no_profile \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/no_profile/runTest.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/no_profile/runTest.py new file mode 100755 index 0000000000..b6129f0ed6 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/no_profile/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfMultibyteNoProfile(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/no_profile/sql/query01.sql b/automation/tincrepo/main/pxf/features/multibyte_delimiter/no_profile/sql/query01.sql new file mode 100755 index 0000000000..77da05b72f --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/no_profile/sql/query01.sql @@ -0,0 +1,3 @@ +-- @description query01 for PXF Multibyte delimiter, no profile case + +SELECT * from pxf_multibyte_no_profile ORDER BY age; \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_byte/__init__.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_byte/__init__.py new file mode 100755 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_byte/expected/query01.ans b/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_byte/expected/query01.ans new file mode 100755 index 0000000000..b9eafe7ff9 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_byte/expected/query01.ans @@ -0,0 +1,106 @@ +-- @description query01 for PXF Multibyte delimiter, one byte delimiter option + +SELECT * from pxf_multibyte_onebyte_data ORDER BY n1; + s1 | s2 | s3 | d1 | n1 | n2 | n3 | n4 | n5 | n6 | n7 | s11 | s12 | s13 | d11 | n11 | n12 | n13 | n14 | n15 | n16 | n17 +-------+--------+---------+---------------------+-----+------+-------+-------+-------+-------+-------+-------+--------+---------+---------------------+-----+------+-------+-------+-------+-------+------- + s_1 | s_10 | s_100 | 1973-03-03 09:46:40 | 1 | 10 | 100 | 100 | 100 | 100 | 100 | s_1 | s_10 | s_100 | 1973-03-03 09:46:40 | 1 | 10 | 100 | 100 | 100 | 100 | 100 + s_2 | s_20 | s_200 | 1976-05-03 07:33:20 | 2 | 20 | 200 | 200 | 200 | 200 | 200 | s_2 | s_20 | s_200 | 1976-05-03 07:33:20 | 2 | 20 | 200 | 200 | 200 | 200 | 200 + s_3 | s_30 | s_300 | 1979-07-05 05:20:00 | 3 | 30 | 300 | 300 | 300 | 300 | 300 | s_3 | s_30 | s_300 | 1979-07-05 05:20:00 | 3 | 30 | 300 | 300 | 300 | 300 | 300 + s_4 | s_40 | s_400 | 1982-09-04 03:06:40 | 4 | 40 | 400 | 400 | 400 | 400 | 400 | s_4 | s_40 | s_400 | 1982-09-04 03:06:40 | 4 | 40 | 400 | 400 | 400 | 400 | 400 + s_5 | s_50 | s_500 | 1985-11-05 12:53:20 | 5 | 50 | 500 | 500 | 500 | 500 | 500 | s_5 | s_50 | s_500 | 1985-11-05 12:53:20 | 5 | 50 | 500 | 500 | 500 | 500 | 500 + s_6 | s_60 | s_600 | 1989-01-05 10:40:00 | 6 | 60 | 600 | 600 | 600 | 600 | 600 | s_6 | s_60 | s_600 | 1989-01-05 10:40:00 | 6 | 60 | 600 | 600 | 600 | 600 | 600 + s_7 | s_70 | s_700 | 1992-03-07 08:26:40 | 7 | 70 | 700 | 700 | 700 | 700 | 700 | s_7 | s_70 | s_700 | 1992-03-07 08:26:40 | 7 | 70 | 700 | 700 | 700 | 700 | 700 + s_8 | s_80 | s_800 | 1995-05-09 06:13:20 | 8 | 80 | 800 | 800 | 800 | 800 | 800 | s_8 | s_80 | s_800 | 1995-05-09 06:13:20 | 8 | 80 | 800 | 800 | 800 | 800 | 800 + s_9 | s_90 | s_900 | 1998-07-09 04:00:00 | 9 | 90 | 900 | 900 | 900 | 900 | 900 | s_9 | s_90 | s_900 | 1998-07-09 04:00:00 | 9 | 90 | 900 | 900 | 900 | 900 | 900 + s_10 | s_100 | s_1000 | 2001-09-09 01:46:40 | 10 | 100 | 1000 | 1000 | 1000 | 1000 | 1000 | s_10 | s_100 | s_1000 | 2001-09-09 01:46:40 | 10 | 100 | 1000 | 1000 | 1000 | 1000 | 1000 + s_11 | s_110 | s_1100 | 2004-11-09 11:33:20 | 11 | 110 | 1100 | 1100 | 1100 | 1100 | 1100 | s_11 | s_110 | s_1100 | 2004-11-09 11:33:20 | 11 | 110 | 1100 | 1100 | 1100 | 1100 | 1100 + s_12 | s_120 | s_1200 | 2008-01-10 09:20:00 | 12 | 120 | 1200 | 1200 | 1200 | 1200 | 1200 | s_12 | s_120 | s_1200 | 2008-01-10 09:20:00 | 12 | 120 | 1200 | 1200 | 1200 | 1200 | 1200 + s_13 | s_130 | s_1300 | 2011-03-13 07:06:40 | 13 | 130 | 1300 | 1300 | 1300 | 1300 | 1300 | s_13 | s_130 | s_1300 | 2011-03-13 07:06:40 | 13 | 130 | 1300 | 1300 | 1300 | 1300 | 1300 + s_14 | s_140 | s_1400 | 2014-05-13 04:53:20 | 14 | 140 | 1400 | 1400 | 1400 | 1400 | 1400 | s_14 | s_140 | s_1400 | 2014-05-13 04:53:20 | 14 | 140 | 1400 | 1400 | 1400 | 1400 | 1400 + s_15 | s_150 | s_1500 | 2017-07-14 02:40:00 | 15 | 150 | 1500 | 1500 | 1500 | 1500 | 1500 | s_15 | s_150 | s_1500 | 2017-07-14 02:40:00 | 15 | 150 | 1500 | 1500 | 1500 | 1500 | 1500 + s_16 | s_160 | s_1600 | 2020-09-13 12:26:40 | 16 | 160 | 1600 | 1600 | 1600 | 1600 | 1600 | s_16 | s_160 | s_1600 | 2020-09-13 12:26:40 | 16 | 160 | 1600 | 1600 | 1600 | 1600 | 1600 + s_17 | s_170 | s_1700 | 2023-11-14 10:13:20 | 17 | 170 | 1700 | 1700 | 1700 | 1700 | 1700 | s_17 | s_170 | s_1700 | 2023-11-14 10:13:20 | 17 | 170 | 1700 | 1700 | 1700 | 1700 | 1700 + s_18 | s_180 | s_1800 | 2027-01-15 08:00:00 | 18 | 180 | 1800 | 1800 | 1800 | 1800 | 1800 | s_18 | s_180 | s_1800 | 2027-01-15 08:00:00 | 18 | 180 | 1800 | 1800 | 1800 | 1800 | 1800 + s_19 | s_190 | s_1900 | 2030-03-17 05:46:40 | 19 | 190 | 1900 | 1900 | 1900 | 1900 | 1900 | s_19 | s_190 | s_1900 | 2030-03-17 05:46:40 | 19 | 190 | 1900 | 1900 | 1900 | 1900 | 1900 + s_20 | s_200 | s_2000 | 2033-05-18 03:33:20 | 20 | 200 | 2000 | 2000 | 2000 | 2000 | 2000 | s_20 | s_200 | s_2000 | 2033-05-18 03:33:20 | 20 | 200 | 2000 | 2000 | 2000 | 2000 | 2000 + s_21 | s_210 | s_2100 | 2036-07-18 01:20:00 | 21 | 210 | 2100 | 2100 | 2100 | 2100 | 2100 | s_21 | s_210 | s_2100 | 2036-07-18 01:20:00 | 21 | 210 | 2100 | 2100 | 2100 | 2100 | 2100 + s_22 | s_220 | s_2200 | 2039-09-18 11:06:40 | 22 | 220 | 2200 | 2200 | 2200 | 2200 | 2200 | s_22 | s_220 | s_2200 | 2039-09-18 11:06:40 | 22 | 220 | 2200 | 2200 | 2200 | 2200 | 2200 + s_23 | s_230 | s_2300 | 2042-11-19 08:53:20 | 23 | 230 | 2300 | 2300 | 2300 | 2300 | 2300 | s_23 | s_230 | s_2300 | 2042-11-19 08:53:20 | 23 | 230 | 2300 | 2300 | 2300 | 2300 | 2300 + s_24 | s_240 | s_2400 | 2046-01-19 06:40:00 | 24 | 240 | 2400 | 2400 | 2400 | 2400 | 2400 | s_24 | s_240 | s_2400 | 2046-01-19 06:40:00 | 24 | 240 | 2400 | 2400 | 2400 | 2400 | 2400 + s_25 | s_250 | s_2500 | 2049-03-22 04:26:40 | 25 | 250 | 2500 | 2500 | 2500 | 2500 | 2500 | s_25 | s_250 | s_2500 | 2049-03-22 04:26:40 | 25 | 250 | 2500 | 2500 | 2500 | 2500 | 2500 + s_26 | s_260 | s_2600 | 2052-05-22 02:13:20 | 26 | 260 | 2600 | 2600 | 2600 | 2600 | 2600 | s_26 | s_260 | s_2600 | 2052-05-22 02:13:20 | 26 | 260 | 2600 | 2600 | 2600 | 2600 | 2600 + s_27 | s_270 | s_2700 | 2055-07-24 12:00:00 | 27 | 270 | 2700 | 2700 | 2700 | 2700 | 2700 | s_27 | s_270 | s_2700 | 2055-07-24 12:00:00 | 27 | 270 | 2700 | 2700 | 2700 | 2700 | 2700 + s_28 | s_280 | s_2800 | 2058-09-23 09:46:40 | 28 | 280 | 2800 | 2800 | 2800 | 2800 | 2800 | s_28 | s_280 | s_2800 | 2058-09-23 09:46:40 | 28 | 280 | 2800 | 2800 | 2800 | 2800 | 2800 + s_29 | s_290 | s_2900 | 2061-11-23 07:33:20 | 29 | 290 | 2900 | 2900 | 2900 | 2900 | 2900 | s_29 | s_290 | s_2900 | 2061-11-23 07:33:20 | 29 | 290 | 2900 | 2900 | 2900 | 2900 | 2900 + s_30 | s_300 | s_3000 | 2065-01-24 05:20:00 | 30 | 300 | 3000 | 3000 | 3000 | 3000 | 3000 | s_30 | s_300 | s_3000 | 2065-01-24 05:20:00 | 30 | 300 | 3000 | 3000 | 3000 | 3000 | 3000 + s_31 | s_310 | s_3100 | 2068-03-26 03:06:40 | 31 | 310 | 3100 | 3100 | 3100 | 3100 | 3100 | s_31 | s_310 | s_3100 | 2068-03-26 03:06:40 | 31 | 310 | 3100 | 3100 | 3100 | 3100 | 3100 + s_32 | s_320 | s_3200 | 2071-05-28 12:53:20 | 32 | 320 | 3200 | 3200 | 3200 | 3200 | 3200 | s_32 | s_320 | s_3200 | 2071-05-28 12:53:20 | 32 | 320 | 3200 | 3200 | 3200 | 3200 | 3200 + s_33 | s_330 | s_3300 | 2074-07-28 10:40:00 | 33 | 330 | 3300 | 3300 | 3300 | 3300 | 3300 | s_33 | s_330 | s_3300 | 2074-07-28 10:40:00 | 33 | 330 | 3300 | 3300 | 3300 | 3300 | 3300 + s_34 | s_340 | s_3400 | 2077-09-27 08:26:40 | 34 | 340 | 3400 | 3400 | 3400 | 3400 | 3400 | s_34 | s_340 | s_3400 | 2077-09-27 08:26:40 | 34 | 340 | 3400 | 3400 | 3400 | 3400 | 3400 + s_35 | s_350 | s_3500 | 2080-11-28 06:13:20 | 35 | 350 | 3500 | 3500 | 3500 | 3500 | 3500 | s_35 | s_350 | s_3500 | 2080-11-28 06:13:20 | 35 | 350 | 3500 | 3500 | 3500 | 3500 | 3500 + s_36 | s_360 | s_3600 | 2084-01-29 04:00:00 | 36 | 360 | 3600 | 3600 | 3600 | 3600 | 3600 | s_36 | s_360 | s_3600 | 2084-01-29 04:00:00 | 36 | 360 | 3600 | 3600 | 3600 | 3600 | 3600 + s_37 | s_370 | s_3700 | 2087-04-01 01:46:40 | 37 | 370 | 3700 | 3700 | 3700 | 3700 | 3700 | s_37 | s_370 | s_3700 | 2087-04-01 01:46:40 | 37 | 370 | 3700 | 3700 | 3700 | 3700 | 3700 + s_38 | s_380 | s_3800 | 2090-06-01 11:33:20 | 38 | 380 | 3800 | 3800 | 3800 | 3800 | 3800 | s_38 | s_380 | s_3800 | 2090-06-01 11:33:20 | 38 | 380 | 3800 | 3800 | 3800 | 3800 | 3800 + s_39 | s_390 | s_3900 | 2093-08-01 09:20:00 | 39 | 390 | 3900 | 3900 | 3900 | 3900 | 3900 | s_39 | s_390 | s_3900 | 2093-08-01 09:20:00 | 39 | 390 | 3900 | 3900 | 3900 | 3900 | 3900 + s_40 | s_400 | s_4000 | 2096-10-02 07:06:40 | 40 | 400 | 4000 | 4000 | 4000 | 4000 | 4000 | s_40 | s_400 | s_4000 | 2096-10-02 07:06:40 | 40 | 400 | 4000 | 4000 | 4000 | 4000 | 4000 + s_41 | s_410 | s_4100 | 2099-12-03 04:53:20 | 41 | 410 | 4100 | 4100 | 4100 | 4100 | 4100 | s_41 | s_410 | s_4100 | 2099-12-03 04:53:20 | 41 | 410 | 4100 | 4100 | 4100 | 4100 | 4100 + s_42 | s_420 | s_4200 | 2103-02-04 02:40:00 | 42 | 420 | 4200 | 4200 | 4200 | 4200 | 4200 | s_42 | s_420 | s_4200 | 2103-02-04 02:40:00 | 42 | 420 | 4200 | 4200 | 4200 | 4200 | 4200 + s_43 | s_430 | s_4300 | 2106-04-06 12:26:40 | 43 | 430 | 4300 | 4300 | 4300 | 4300 | 4300 | s_43 | s_430 | s_4300 | 2106-04-06 12:26:40 | 43 | 430 | 4300 | 4300 | 4300 | 4300 | 4300 + s_44 | s_440 | s_4400 | 2109-06-06 10:13:20 | 44 | 440 | 4400 | 4400 | 4400 | 4400 | 4400 | s_44 | s_440 | s_4400 | 2109-06-06 10:13:20 | 44 | 440 | 4400 | 4400 | 4400 | 4400 | 4400 + s_45 | s_450 | s_4500 | 2112-08-07 08:00:00 | 45 | 450 | 4500 | 4500 | 4500 | 4500 | 4500 | s_45 | s_450 | s_4500 | 2112-08-07 08:00:00 | 45 | 450 | 4500 | 4500 | 4500 | 4500 | 4500 + s_46 | s_460 | s_4600 | 2115-10-08 05:46:40 | 46 | 460 | 4600 | 4600 | 4600 | 4600 | 4600 | s_46 | s_460 | s_4600 | 2115-10-08 05:46:40 | 46 | 460 | 4600 | 4600 | 4600 | 4600 | 4600 + s_47 | s_470 | s_4700 | 2118-12-09 03:33:20 | 47 | 470 | 4700 | 4700 | 4700 | 4700 | 4700 | s_47 | s_470 | s_4700 | 2118-12-09 03:33:20 | 47 | 470 | 4700 | 4700 | 4700 | 4700 | 4700 + s_48 | s_480 | s_4800 | 2122-02-08 01:20:00 | 48 | 480 | 4800 | 4800 | 4800 | 4800 | 4800 | s_48 | s_480 | s_4800 | 2122-02-08 01:20:00 | 48 | 480 | 4800 | 4800 | 4800 | 4800 | 4800 + s_49 | s_490 | s_4900 | 2125-04-10 11:06:40 | 49 | 490 | 4900 | 4900 | 4900 | 4900 | 4900 | s_49 | s_490 | s_4900 | 2125-04-10 11:06:40 | 49 | 490 | 4900 | 4900 | 4900 | 4900 | 4900 + s_50 | s_500 | s_5000 | 2128-06-11 08:53:20 | 50 | 500 | 5000 | 5000 | 5000 | 5000 | 5000 | s_50 | s_500 | s_5000 | 2128-06-11 08:53:20 | 50 | 500 | 5000 | 5000 | 5000 | 5000 | 5000 + s_51 | s_510 | s_5100 | 2131-08-12 06:40:00 | 51 | 510 | 5100 | 5100 | 5100 | 5100 | 5100 | s_51 | s_510 | s_5100 | 2131-08-12 06:40:00 | 51 | 510 | 5100 | 5100 | 5100 | 5100 | 5100 + s_52 | s_520 | s_5200 | 2134-10-13 04:26:40 | 52 | 520 | 5200 | 5200 | 5200 | 5200 | 5200 | s_52 | s_520 | s_5200 | 2134-10-13 04:26:40 | 52 | 520 | 5200 | 5200 | 5200 | 5200 | 5200 + s_53 | s_530 | s_5300 | 2137-12-13 02:13:20 | 53 | 530 | 5300 | 5300 | 5300 | 5300 | 5300 | s_53 | s_530 | s_5300 | 2137-12-13 02:13:20 | 53 | 530 | 5300 | 5300 | 5300 | 5300 | 5300 + s_54 | s_540 | s_5400 | 2141-02-13 12:00:00 | 54 | 540 | 5400 | 5400 | 5400 | 5400 | 5400 | s_54 | s_540 | s_5400 | 2141-02-13 12:00:00 | 54 | 540 | 5400 | 5400 | 5400 | 5400 | 5400 + s_55 | s_550 | s_5500 | 2144-04-15 09:46:40 | 55 | 550 | 5500 | 5500 | 5500 | 5500 | 5500 | s_55 | s_550 | s_5500 | 2144-04-15 09:46:40 | 55 | 550 | 5500 | 5500 | 5500 | 5500 | 5500 + s_56 | s_560 | s_5600 | 2147-06-16 07:33:20 | 56 | 560 | 5600 | 5600 | 5600 | 5600 | 5600 | s_56 | s_560 | s_5600 | 2147-06-16 07:33:20 | 56 | 560 | 5600 | 5600 | 5600 | 5600 | 5600 + s_57 | s_570 | s_5700 | 2150-08-17 05:20:00 | 57 | 570 | 5700 | 5700 | 5700 | 5700 | 5700 | s_57 | s_570 | s_5700 | 2150-08-17 05:20:00 | 57 | 570 | 5700 | 5700 | 5700 | 5700 | 5700 + s_58 | s_580 | s_5800 | 2153-10-17 03:06:40 | 58 | 580 | 5800 | 5800 | 5800 | 5800 | 5800 | s_58 | s_580 | s_5800 | 2153-10-17 03:06:40 | 58 | 580 | 5800 | 5800 | 5800 | 5800 | 5800 + s_59 | s_590 | s_5900 | 2156-12-18 12:53:20 | 59 | 590 | 5900 | 5900 | 5900 | 5900 | 5900 | s_59 | s_590 | s_5900 | 2156-12-18 12:53:20 | 59 | 590 | 5900 | 5900 | 5900 | 5900 | 5900 + s_60 | s_600 | s_6000 | 2160-02-18 10:40:00 | 60 | 600 | 6000 | 6000 | 6000 | 6000 | 6000 | s_60 | s_600 | s_6000 | 2160-02-18 10:40:00 | 60 | 600 | 6000 | 6000 | 6000 | 6000 | 6000 + s_61 | s_610 | s_6100 | 2163-04-20 08:26:40 | 61 | 610 | 6100 | 6100 | 6100 | 6100 | 6100 | s_61 | s_610 | s_6100 | 2163-04-20 08:26:40 | 61 | 610 | 6100 | 6100 | 6100 | 6100 | 6100 + s_62 | s_620 | s_6200 | 2166-06-21 06:13:20 | 62 | 620 | 6200 | 6200 | 6200 | 6200 | 6200 | s_62 | s_620 | s_6200 | 2166-06-21 06:13:20 | 62 | 620 | 6200 | 6200 | 6200 | 6200 | 6200 + s_63 | s_630 | s_6300 | 2169-08-21 04:00:00 | 63 | 630 | 6300 | 6300 | 6300 | 6300 | 6300 | s_63 | s_630 | s_6300 | 2169-08-21 04:00:00 | 63 | 630 | 6300 | 6300 | 6300 | 6300 | 6300 + s_64 | s_640 | s_6400 | 2172-10-22 01:46:40 | 64 | 640 | 6400 | 6400 | 6400 | 6400 | 6400 | s_64 | s_640 | s_6400 | 2172-10-22 01:46:40 | 64 | 640 | 6400 | 6400 | 6400 | 6400 | 6400 + s_65 | s_650 | s_6500 | 2175-12-23 11:33:20 | 65 | 650 | 6500 | 6500 | 6500 | 6500 | 6500 | s_65 | s_650 | s_6500 | 2175-12-23 11:33:20 | 65 | 650 | 6500 | 6500 | 6500 | 6500 | 6500 + s_66 | s_660 | s_6600 | 2179-02-22 09:20:00 | 66 | 660 | 6600 | 6600 | 6600 | 6600 | 6600 | s_66 | s_660 | s_6600 | 2179-02-22 09:20:00 | 66 | 660 | 6600 | 6600 | 6600 | 6600 | 6600 + s_67 | s_670 | s_6700 | 2182-04-25 07:06:40 | 67 | 670 | 6700 | 6700 | 6700 | 6700 | 6700 | s_67 | s_670 | s_6700 | 2182-04-25 07:06:40 | 67 | 670 | 6700 | 6700 | 6700 | 6700 | 6700 + s_68 | s_680 | s_6800 | 2185-06-25 04:53:20 | 68 | 680 | 6800 | 6800 | 6800 | 6800 | 6800 | s_68 | s_680 | s_6800 | 2185-06-25 04:53:20 | 68 | 680 | 6800 | 6800 | 6800 | 6800 | 6800 + s_69 | s_690 | s_6900 | 2188-08-26 02:40:00 | 69 | 690 | 6900 | 6900 | 6900 | 6900 | 6900 | s_69 | s_690 | s_6900 | 2188-08-26 02:40:00 | 69 | 690 | 6900 | 6900 | 6900 | 6900 | 6900 + s_70 | s_700 | s_7000 | 2191-10-27 12:26:40 | 70 | 700 | 7000 | 7000 | 7000 | 7000 | 7000 | s_70 | s_700 | s_7000 | 2191-10-27 12:26:40 | 70 | 700 | 7000 | 7000 | 7000 | 7000 | 7000 + s_71 | s_710 | s_7100 | 2194-12-27 10:13:20 | 71 | 710 | 7100 | 7100 | 7100 | 7100 | 7100 | s_71 | s_710 | s_7100 | 2194-12-27 10:13:20 | 71 | 710 | 7100 | 7100 | 7100 | 7100 | 7100 + s_72 | s_720 | s_7200 | 2198-02-27 08:00:00 | 72 | 720 | 7200 | 7200 | 7200 | 7200 | 7200 | s_72 | s_720 | s_7200 | 2198-02-27 08:00:00 | 72 | 720 | 7200 | 7200 | 7200 | 7200 | 7200 + s_73 | s_730 | s_7300 | 2201-04-30 05:46:40 | 73 | 730 | 7300 | 7300 | 7300 | 7300 | 7300 | s_73 | s_730 | s_7300 | 2201-04-30 05:46:40 | 73 | 730 | 7300 | 7300 | 7300 | 7300 | 7300 + s_74 | s_740 | s_7400 | 2204-07-01 03:33:20 | 74 | 740 | 7400 | 7400 | 7400 | 7400 | 7400 | s_74 | s_740 | s_7400 | 2204-07-01 03:33:20 | 74 | 740 | 7400 | 7400 | 7400 | 7400 | 7400 + s_75 | s_750 | s_7500 | 2207-09-01 01:20:00 | 75 | 750 | 7500 | 7500 | 7500 | 7500 | 7500 | s_75 | s_750 | s_7500 | 2207-09-01 01:20:00 | 75 | 750 | 7500 | 7500 | 7500 | 7500 | 7500 + s_76 | s_760 | s_7600 | 2210-11-01 11:06:40 | 76 | 760 | 7600 | 7600 | 7600 | 7600 | 7600 | s_76 | s_760 | s_7600 | 2210-11-01 11:06:40 | 76 | 760 | 7600 | 7600 | 7600 | 7600 | 7600 + s_77 | s_770 | s_7700 | 2214-01-02 08:53:20 | 77 | 770 | 7700 | 7700 | 7700 | 7700 | 7700 | s_77 | s_770 | s_7700 | 2214-01-02 08:53:20 | 77 | 770 | 7700 | 7700 | 7700 | 7700 | 7700 + s_78 | s_780 | s_7800 | 2217-03-04 06:40:00 | 78 | 780 | 7800 | 7800 | 7800 | 7800 | 7800 | s_78 | s_780 | s_7800 | 2217-03-04 06:40:00 | 78 | 780 | 7800 | 7800 | 7800 | 7800 | 7800 + s_79 | s_790 | s_7900 | 2220-05-05 04:26:40 | 79 | 790 | 7900 | 7900 | 7900 | 7900 | 7900 | s_79 | s_790 | s_7900 | 2220-05-05 04:26:40 | 79 | 790 | 7900 | 7900 | 7900 | 7900 | 7900 + s_80 | s_800 | s_8000 | 2223-07-06 02:13:20 | 80 | 800 | 8000 | 8000 | 8000 | 8000 | 8000 | s_80 | s_800 | s_8000 | 2223-07-06 02:13:20 | 80 | 800 | 8000 | 8000 | 8000 | 8000 | 8000 + s_81 | s_810 | s_8100 | 2226-09-06 12:00:00 | 81 | 810 | 8100 | 8100 | 8100 | 8100 | 8100 | s_81 | s_810 | s_8100 | 2226-09-06 12:00:00 | 81 | 810 | 8100 | 8100 | 8100 | 8100 | 8100 + s_82 | s_820 | s_8200 | 2229-11-06 09:46:40 | 82 | 820 | 8200 | 8200 | 8200 | 8200 | 8200 | s_82 | s_820 | s_8200 | 2229-11-06 09:46:40 | 82 | 820 | 8200 | 8200 | 8200 | 8200 | 8200 + s_83 | s_830 | s_8300 | 2233-01-06 07:33:20 | 83 | 830 | 8300 | 8300 | 8300 | 8300 | 8300 | s_83 | s_830 | s_8300 | 2233-01-06 07:33:20 | 83 | 830 | 8300 | 8300 | 8300 | 8300 | 8300 + s_84 | s_840 | s_8400 | 2236-03-09 05:20:00 | 84 | 840 | 8400 | 8400 | 8400 | 8400 | 8400 | s_84 | s_840 | s_8400 | 2236-03-09 05:20:00 | 84 | 840 | 8400 | 8400 | 8400 | 8400 | 8400 + s_85 | s_850 | s_8500 | 2239-05-10 03:06:40 | 85 | 850 | 8500 | 8500 | 8500 | 8500 | 8500 | s_85 | s_850 | s_8500 | 2239-05-10 03:06:40 | 85 | 850 | 8500 | 8500 | 8500 | 8500 | 8500 + s_86 | s_860 | s_8600 | 2242-07-11 12:53:20 | 86 | 860 | 8600 | 8600 | 8600 | 8600 | 8600 | s_86 | s_860 | s_8600 | 2242-07-11 12:53:20 | 86 | 860 | 8600 | 8600 | 8600 | 8600 | 8600 + s_87 | s_870 | s_8700 | 2245-09-10 10:40:00 | 87 | 870 | 8700 | 8700 | 8700 | 8700 | 8700 | s_87 | s_870 | s_8700 | 2245-09-10 10:40:00 | 87 | 870 | 8700 | 8700 | 8700 | 8700 | 8700 + s_88 | s_880 | s_8800 | 2248-11-10 08:26:40 | 88 | 880 | 8800 | 8800 | 8800 | 8800 | 8800 | s_88 | s_880 | s_8800 | 2248-11-10 08:26:40 | 88 | 880 | 8800 | 8800 | 8800 | 8800 | 8800 + s_89 | s_890 | s_8900 | 2252-01-12 06:13:20 | 89 | 890 | 8900 | 8900 | 8900 | 8900 | 8900 | s_89 | s_890 | s_8900 | 2252-01-12 06:13:20 | 89 | 890 | 8900 | 8900 | 8900 | 8900 | 8900 + s_90 | s_900 | s_9000 | 2255-03-14 04:00:00 | 90 | 900 | 9000 | 9000 | 9000 | 9000 | 9000 | s_90 | s_900 | s_9000 | 2255-03-14 04:00:00 | 90 | 900 | 9000 | 9000 | 9000 | 9000 | 9000 + s_91 | s_910 | s_9100 | 2258-05-15 01:46:40 | 91 | 910 | 9100 | 9100 | 9100 | 9100 | 9100 | s_91 | s_910 | s_9100 | 2258-05-15 01:46:40 | 91 | 910 | 9100 | 9100 | 9100 | 9100 | 9100 + s_92 | s_920 | s_9200 | 2261-07-15 11:33:20 | 92 | 920 | 9200 | 9200 | 9200 | 9200 | 9200 | s_92 | s_920 | s_9200 | 2261-07-15 11:33:20 | 92 | 920 | 9200 | 9200 | 9200 | 9200 | 9200 + s_93 | s_930 | s_9300 | 2264-09-14 09:20:00 | 93 | 930 | 9300 | 9300 | 9300 | 9300 | 9300 | s_93 | s_930 | s_9300 | 2264-09-14 09:20:00 | 93 | 930 | 9300 | 9300 | 9300 | 9300 | 9300 + s_94 | s_940 | s_9400 | 2267-11-16 07:06:40 | 94 | 940 | 9400 | 9400 | 9400 | 9400 | 9400 | s_94 | s_940 | s_9400 | 2267-11-16 07:06:40 | 94 | 940 | 9400 | 9400 | 9400 | 9400 | 9400 + s_95 | s_950 | s_9500 | 2271-01-16 04:53:20 | 95 | 950 | 9500 | 9500 | 9500 | 9500 | 9500 | s_95 | s_950 | s_9500 | 2271-01-16 04:53:20 | 95 | 950 | 9500 | 9500 | 9500 | 9500 | 9500 + s_96 | s_960 | s_9600 | 2274-03-19 02:40:00 | 96 | 960 | 9600 | 9600 | 9600 | 9600 | 9600 | s_96 | s_960 | s_9600 | 2274-03-19 02:40:00 | 96 | 960 | 9600 | 9600 | 9600 | 9600 | 9600 + s_97 | s_970 | s_9700 | 2277-05-19 12:26:40 | 97 | 970 | 9700 | 9700 | 9700 | 9700 | 9700 | s_97 | s_970 | s_9700 | 2277-05-19 12:26:40 | 97 | 970 | 9700 | 9700 | 9700 | 9700 | 9700 + s_98 | s_980 | s_9800 | 2280-07-19 10:13:20 | 98 | 980 | 9800 | 9800 | 9800 | 9800 | 9800 | s_98 | s_980 | s_9800 | 2280-07-19 10:13:20 | 98 | 980 | 9800 | 9800 | 9800 | 9800 | 9800 + s_99 | s_990 | s_9900 | 2283-09-20 08:00:00 | 99 | 990 | 9900 | 9900 | 9900 | 9900 | 9900 | s_99 | s_990 | s_9900 | 2283-09-20 08:00:00 | 99 | 990 | 9900 | 9900 | 9900 | 9900 | 9900 + s_100 | s_1000 | s_10000 | 2286-11-20 05:46:40 | 100 | 1000 | 10000 | 10000 | 10000 | 10000 | 10000 | s_100 | s_1000 | s_10000 | 2286-11-20 05:46:40 | 100 | 1000 | 10000 | 10000 | 10000 | 10000 | 10000 +(100 rows) diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_byte/runTest.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_byte/runTest.py new file mode 100755 index 0000000000..1c157338f6 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_byte/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfMultibyteOneByteDelimiterData(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_byte/sql/query01.sql b/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_byte/sql/query01.sql new file mode 100755 index 0000000000..de267a49f1 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_byte/sql/query01.sql @@ -0,0 +1,3 @@ +-- @description query01 for PXF Multibyte delimiter, one byte delimiter option + +SELECT * from pxf_multibyte_onebyte_data ORDER BY n1; diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_col/__init__.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_col/__init__.py new file mode 100755 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_col/expected/query01.ans b/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_col/expected/query01.ans new file mode 100755 index 0000000000..246b6501cb --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_col/expected/query01.ans @@ -0,0 +1,11 @@ +-- start_ignore +-- end_ignore +-- @description query01 for PXF Multibyte delimiter, one col +SELECT * from pxf_multibyte_onecol_data ORDER BY s1; + s1 +--------------------------------------------- + minden amire szüksége van a szeretet + règles d'automation + tá sé seo le tástáil dea- +(3 rows) + diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_col/runTest.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_col/runTest.py new file mode 100755 index 0000000000..98f0b77aa7 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_col/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfMultibyteOneCol(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_col/sql/query01.sql b/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_col/sql/query01.sql new file mode 100755 index 0000000000..288a8a3dad --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_col/sql/query01.sql @@ -0,0 +1,3 @@ +-- @description query01 for PXF Multibyte delimiter, one col + +SELECT * from pxf_multibyte_onecol_data ORDER BY s1; diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_col_quote/__init__.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_col_quote/__init__.py new file mode 100755 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_col_quote/expected/query01.ans b/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_col_quote/expected/query01.ans new file mode 100755 index 0000000000..ebafea46eb --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_col_quote/expected/query01.ans @@ -0,0 +1,11 @@ +-- start_ignore +-- end_ignore +-- @description query01 for PXF Multibyte delimiter, one col with quote +SELECT * from pxf_multibyte_onecol_quote_data ORDER BY s1; + s1 +------------------------------------------- + minden amire szüksége van a szeretet + règles d'automation + tá sé seo le tástáil dea- +(3 rows) + diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_col_quote/runTest.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_col_quote/runTest.py new file mode 100755 index 0000000000..95876a90ce --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_col_quote/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfMultibyteOneColQuote(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_col_quote/sql/query01.sql b/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_col_quote/sql/query01.sql new file mode 100755 index 0000000000..810f158242 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/one_col_quote/sql/query01.sql @@ -0,0 +1,3 @@ +-- @description query01 for PXF Multibyte delimiter, one col with quote + +SELECT * from pxf_multibyte_onecol_quote_data ORDER BY s1; diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/quote_escape_newline/__init__.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/quote_escape_newline/__init__.py new file mode 100755 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/quote_escape_newline/expected/query01.ans b/automation/tincrepo/main/pxf/features/multibyte_delimiter/quote_escape_newline/expected/query01.ans new file mode 100755 index 0000000000..3422e4d7c9 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/quote_escape_newline/expected/query01.ans @@ -0,0 +1,6 @@ +-- start_ignore +-- end_ignore +-- @description query01 for PXF Multibyte delimiter, 2-byte delim with wrong escape case +SELECT * from pxf_multibyte_quote_escape_newline_data ORDER BY n1; +ERROR: NEWLINE can only be LF, CRLF, or CR (pxfdelimited_formatter.c:xxx) +CONTEXT: External table pxf_multibyte_quote_escape_newline_data diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/quote_escape_newline/runTest.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/quote_escape_newline/runTest.py new file mode 100755 index 0000000000..6400dd3ecd --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/quote_escape_newline/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfMultibyteQuoteEscapeNewlineData(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/quote_escape_newline/sql/query01.sql b/automation/tincrepo/main/pxf/features/multibyte_delimiter/quote_escape_newline/sql/query01.sql new file mode 100755 index 0000000000..0f813f88bb --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/quote_escape_newline/sql/query01.sql @@ -0,0 +1,3 @@ +-- @description query01 for PXF Multibyte delimiter, 2-byte delim with wrong escape case + +SELECT * from pxf_multibyte_quote_escape_newline_data ORDER BY n1; \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/three_byte/__init__.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/three_byte/__init__.py new file mode 100755 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/three_byte/expected/query01.ans b/automation/tincrepo/main/pxf/features/multibyte_delimiter/three_byte/expected/query01.ans new file mode 100755 index 0000000000..70c2bd71e4 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/three_byte/expected/query01.ans @@ -0,0 +1,201 @@ +-- @description query01 for PXF Multibyte delimiter, 3-byte delim cases + +SELECT * from pxf_multibyte_threebyte_data ORDER BY n1; + s1 | s2 | s3 | d1 | n1 | n2 | n3 | n4 | n5 | n6 | n7 | s11 | s12 | s13 | d11 | n11 | n12 | n13 | n14 | n15 | n16 | n17 +-------+--------+---------+---------------------+-----+------+-------+-------+-------+-------+-------+-------+--------+---------+---------------------+-----+------+-------+-------+-------+-------+------- + s_1 | s_10 | s_100 | 1973-03-03 09:46:40 | 1 | 10 | 100 | 100 | 100 | 100 | 100 | s_1 | s_10 | s_100 | 1973-03-03 09:46:40 | 1 | 10 | 100 | 100 | 100 | 100 | 100 + s_2 | s_20 | s_200 | 1976-05-03 07:33:20 | 2 | 20 | 200 | 200 | 200 | 200 | 200 | s_2 | s_20 | s_200 | 1976-05-03 07:33:20 | 2 | 20 | 200 | 200 | 200 | 200 | 200 + s_3 | s_30 | s_300 | 1979-07-05 05:20:00 | 3 | 30 | 300 | 300 | 300 | 300 | 300 | s_3 | s_30 | s_300 | 1979-07-05 05:20:00 | 3 | 30 | 300 | 300 | 300 | 300 | 300 + s_4 | s_40 | s_400 | 1982-09-04 03:06:40 | 4 | 40 | 400 | 400 | 400 | 400 | 400 | s_4 | s_40 | s_400 | 1982-09-04 03:06:40 | 4 | 40 | 400 | 400 | 400 | 400 | 400 + s_5 | s_50 | s_500 | 1985-11-05 12:53:20 | 5 | 50 | 500 | 500 | 500 | 500 | 500 | s_5 | s_50 | s_500 | 1985-11-05 12:53:20 | 5 | 50 | 500 | 500 | 500 | 500 | 500 + s_6 | s_60 | s_600 | 1989-01-05 10:40:00 | 6 | 60 | 600 | 600 | 600 | 600 | 600 | s_6 | s_60 | s_600 | 1989-01-05 10:40:00 | 6 | 60 | 600 | 600 | 600 | 600 | 600 + s_7 | s_70 | s_700 | 1992-03-07 08:26:40 | 7 | 70 | 700 | 700 | 700 | 700 | 700 | s_7 | s_70 | s_700 | 1992-03-07 08:26:40 | 7 | 70 | 700 | 700 | 700 | 700 | 700 + s_8 | s_80 | s_800 | 1995-05-09 06:13:20 | 8 | 80 | 800 | 800 | 800 | 800 | 800 | s_8 | s_80 | s_800 | 1995-05-09 06:13:20 | 8 | 80 | 800 | 800 | 800 | 800 | 800 + s_9 | s_90 | s_900 | 1998-07-09 04:00:00 | 9 | 90 | 900 | 900 | 900 | 900 | 900 | s_9 | s_90 | s_900 | 1998-07-09 04:00:00 | 9 | 90 | 900 | 900 | 900 | 900 | 900 + s_10 | s_100 | s_1000 | 2001-09-09 01:46:40 | 10 | 100 | 1000 | 1000 | 1000 | 1000 | 1000 | s_10 | s_100 | s_1000 | 2001-09-09 01:46:40 | 10 | 100 | 1000 | 1000 | 1000 | 1000 | 1000 + s_11 | s_110 | s_1100 | 2004-11-09 11:33:20 | 11 | 110 | 1100 | 1100 | 1100 | 1100 | 1100 | s_11 | s_110 | s_1100 | 2004-11-09 11:33:20 | 11 | 110 | 1100 | 1100 | 1100 | 1100 | 1100 + s_12 | s_120 | s_1200 | 2008-01-10 09:20:00 | 12 | 120 | 1200 | 1200 | 1200 | 1200 | 1200 | s_12 | s_120 | s_1200 | 2008-01-10 09:20:00 | 12 | 120 | 1200 | 1200 | 1200 | 1200 | 1200 + s_13 | s_130 | s_1300 | 2011-03-13 07:06:40 | 13 | 130 | 1300 | 1300 | 1300 | 1300 | 1300 | s_13 | s_130 | s_1300 | 2011-03-13 07:06:40 | 13 | 130 | 1300 | 1300 | 1300 | 1300 | 1300 + s_14 | s_140 | s_1400 | 2014-05-13 04:53:20 | 14 | 140 | 1400 | 1400 | 1400 | 1400 | 1400 | s_14 | s_140 | s_1400 | 2014-05-13 04:53:20 | 14 | 140 | 1400 | 1400 | 1400 | 1400 | 1400 + s_15 | s_150 | s_1500 | 2017-07-14 02:40:00 | 15 | 150 | 1500 | 1500 | 1500 | 1500 | 1500 | s_15 | s_150 | s_1500 | 2017-07-14 02:40:00 | 15 | 150 | 1500 | 1500 | 1500 | 1500 | 1500 + s_16 | s_160 | s_1600 | 2020-09-13 12:26:40 | 16 | 160 | 1600 | 1600 | 1600 | 1600 | 1600 | s_16 | s_160 | s_1600 | 2020-09-13 12:26:40 | 16 | 160 | 1600 | 1600 | 1600 | 1600 | 1600 + s_17 | s_170 | s_1700 | 2023-11-14 10:13:20 | 17 | 170 | 1700 | 1700 | 1700 | 1700 | 1700 | s_17 | s_170 | s_1700 | 2023-11-14 10:13:20 | 17 | 170 | 1700 | 1700 | 1700 | 1700 | 1700 + s_18 | s_180 | s_1800 | 2027-01-15 08:00:00 | 18 | 180 | 1800 | 1800 | 1800 | 1800 | 1800 | s_18 | s_180 | s_1800 | 2027-01-15 08:00:00 | 18 | 180 | 1800 | 1800 | 1800 | 1800 | 1800 + s_19 | s_190 | s_1900 | 2030-03-17 05:46:40 | 19 | 190 | 1900 | 1900 | 1900 | 1900 | 1900 | s_19 | s_190 | s_1900 | 2030-03-17 05:46:40 | 19 | 190 | 1900 | 1900 | 1900 | 1900 | 1900 + s_20 | s_200 | s_2000 | 2033-05-18 03:33:20 | 20 | 200 | 2000 | 2000 | 2000 | 2000 | 2000 | s_20 | s_200 | s_2000 | 2033-05-18 03:33:20 | 20 | 200 | 2000 | 2000 | 2000 | 2000 | 2000 + s_21 | s_210 | s_2100 | 2036-07-18 01:20:00 | 21 | 210 | 2100 | 2100 | 2100 | 2100 | 2100 | s_21 | s_210 | s_2100 | 2036-07-18 01:20:00 | 21 | 210 | 2100 | 2100 | 2100 | 2100 | 2100 + s_22 | s_220 | s_2200 | 2039-09-18 11:06:40 | 22 | 220 | 2200 | 2200 | 2200 | 2200 | 2200 | s_22 | s_220 | s_2200 | 2039-09-18 11:06:40 | 22 | 220 | 2200 | 2200 | 2200 | 2200 | 2200 + s_23 | s_230 | s_2300 | 2042-11-19 08:53:20 | 23 | 230 | 2300 | 2300 | 2300 | 2300 | 2300 | s_23 | s_230 | s_2300 | 2042-11-19 08:53:20 | 23 | 230 | 2300 | 2300 | 2300 | 2300 | 2300 + s_24 | s_240 | s_2400 | 2046-01-19 06:40:00 | 24 | 240 | 2400 | 2400 | 2400 | 2400 | 2400 | s_24 | s_240 | s_2400 | 2046-01-19 06:40:00 | 24 | 240 | 2400 | 2400 | 2400 | 2400 | 2400 + s_25 | s_250 | s_2500 | 2049-03-22 04:26:40 | 25 | 250 | 2500 | 2500 | 2500 | 2500 | 2500 | s_25 | s_250 | s_2500 | 2049-03-22 04:26:40 | 25 | 250 | 2500 | 2500 | 2500 | 2500 | 2500 + s_26 | s_260 | s_2600 | 2052-05-22 02:13:20 | 26 | 260 | 2600 | 2600 | 2600 | 2600 | 2600 | s_26 | s_260 | s_2600 | 2052-05-22 02:13:20 | 26 | 260 | 2600 | 2600 | 2600 | 2600 | 2600 + s_27 | s_270 | s_2700 | 2055-07-24 12:00:00 | 27 | 270 | 2700 | 2700 | 2700 | 2700 | 2700 | s_27 | s_270 | s_2700 | 2055-07-24 12:00:00 | 27 | 270 | 2700 | 2700 | 2700 | 2700 | 2700 + s_28 | s_280 | s_2800 | 2058-09-23 09:46:40 | 28 | 280 | 2800 | 2800 | 2800 | 2800 | 2800 | s_28 | s_280 | s_2800 | 2058-09-23 09:46:40 | 28 | 280 | 2800 | 2800 | 2800 | 2800 | 2800 + s_29 | s_290 | s_2900 | 2061-11-23 07:33:20 | 29 | 290 | 2900 | 2900 | 2900 | 2900 | 2900 | s_29 | s_290 | s_2900 | 2061-11-23 07:33:20 | 29 | 290 | 2900 | 2900 | 2900 | 2900 | 2900 + s_30 | s_300 | s_3000 | 2065-01-24 05:20:00 | 30 | 300 | 3000 | 3000 | 3000 | 3000 | 3000 | s_30 | s_300 | s_3000 | 2065-01-24 05:20:00 | 30 | 300 | 3000 | 3000 | 3000 | 3000 | 3000 + s_31 | s_310 | s_3100 | 2068-03-26 03:06:40 | 31 | 310 | 3100 | 3100 | 3100 | 3100 | 3100 | s_31 | s_310 | s_3100 | 2068-03-26 03:06:40 | 31 | 310 | 3100 | 3100 | 3100 | 3100 | 3100 + s_32 | s_320 | s_3200 | 2071-05-28 12:53:20 | 32 | 320 | 3200 | 3200 | 3200 | 3200 | 3200 | s_32 | s_320 | s_3200 | 2071-05-28 12:53:20 | 32 | 320 | 3200 | 3200 | 3200 | 3200 | 3200 + s_33 | s_330 | s_3300 | 2074-07-28 10:40:00 | 33 | 330 | 3300 | 3300 | 3300 | 3300 | 3300 | s_33 | s_330 | s_3300 | 2074-07-28 10:40:00 | 33 | 330 | 3300 | 3300 | 3300 | 3300 | 3300 + s_34 | s_340 | s_3400 | 2077-09-27 08:26:40 | 34 | 340 | 3400 | 3400 | 3400 | 3400 | 3400 | s_34 | s_340 | s_3400 | 2077-09-27 08:26:40 | 34 | 340 | 3400 | 3400 | 3400 | 3400 | 3400 + s_35 | s_350 | s_3500 | 2080-11-28 06:13:20 | 35 | 350 | 3500 | 3500 | 3500 | 3500 | 3500 | s_35 | s_350 | s_3500 | 2080-11-28 06:13:20 | 35 | 350 | 3500 | 3500 | 3500 | 3500 | 3500 + s_36 | s_360 | s_3600 | 2084-01-29 04:00:00 | 36 | 360 | 3600 | 3600 | 3600 | 3600 | 3600 | s_36 | s_360 | s_3600 | 2084-01-29 04:00:00 | 36 | 360 | 3600 | 3600 | 3600 | 3600 | 3600 + s_37 | s_370 | s_3700 | 2087-04-01 01:46:40 | 37 | 370 | 3700 | 3700 | 3700 | 3700 | 3700 | s_37 | s_370 | s_3700 | 2087-04-01 01:46:40 | 37 | 370 | 3700 | 3700 | 3700 | 3700 | 3700 + s_38 | s_380 | s_3800 | 2090-06-01 11:33:20 | 38 | 380 | 3800 | 3800 | 3800 | 3800 | 3800 | s_38 | s_380 | s_3800 | 2090-06-01 11:33:20 | 38 | 380 | 3800 | 3800 | 3800 | 3800 | 3800 + s_39 | s_390 | s_3900 | 2093-08-01 09:20:00 | 39 | 390 | 3900 | 3900 | 3900 | 3900 | 3900 | s_39 | s_390 | s_3900 | 2093-08-01 09:20:00 | 39 | 390 | 3900 | 3900 | 3900 | 3900 | 3900 + s_40 | s_400 | s_4000 | 2096-10-02 07:06:40 | 40 | 400 | 4000 | 4000 | 4000 | 4000 | 4000 | s_40 | s_400 | s_4000 | 2096-10-02 07:06:40 | 40 | 400 | 4000 | 4000 | 4000 | 4000 | 4000 + s_41 | s_410 | s_4100 | 2099-12-03 04:53:20 | 41 | 410 | 4100 | 4100 | 4100 | 4100 | 4100 | s_41 | s_410 | s_4100 | 2099-12-03 04:53:20 | 41 | 410 | 4100 | 4100 | 4100 | 4100 | 4100 + s_42 | s_420 | s_4200 | 2103-02-04 02:40:00 | 42 | 420 | 4200 | 4200 | 4200 | 4200 | 4200 | s_42 | s_420 | s_4200 | 2103-02-04 02:40:00 | 42 | 420 | 4200 | 4200 | 4200 | 4200 | 4200 + s_43 | s_430 | s_4300 | 2106-04-06 12:26:40 | 43 | 430 | 4300 | 4300 | 4300 | 4300 | 4300 | s_43 | s_430 | s_4300 | 2106-04-06 12:26:40 | 43 | 430 | 4300 | 4300 | 4300 | 4300 | 4300 + s_44 | s_440 | s_4400 | 2109-06-06 10:13:20 | 44 | 440 | 4400 | 4400 | 4400 | 4400 | 4400 | s_44 | s_440 | s_4400 | 2109-06-06 10:13:20 | 44 | 440 | 4400 | 4400 | 4400 | 4400 | 4400 + s_45 | s_450 | s_4500 | 2112-08-07 08:00:00 | 45 | 450 | 4500 | 4500 | 4500 | 4500 | 4500 | s_45 | s_450 | s_4500 | 2112-08-07 08:00:00 | 45 | 450 | 4500 | 4500 | 4500 | 4500 | 4500 + s_46 | s_460 | s_4600 | 2115-10-08 05:46:40 | 46 | 460 | 4600 | 4600 | 4600 | 4600 | 4600 | s_46 | s_460 | s_4600 | 2115-10-08 05:46:40 | 46 | 460 | 4600 | 4600 | 4600 | 4600 | 4600 + s_47 | s_470 | s_4700 | 2118-12-09 03:33:20 | 47 | 470 | 4700 | 4700 | 4700 | 4700 | 4700 | s_47 | s_470 | s_4700 | 2118-12-09 03:33:20 | 47 | 470 | 4700 | 4700 | 4700 | 4700 | 4700 + s_48 | s_480 | s_4800 | 2122-02-08 01:20:00 | 48 | 480 | 4800 | 4800 | 4800 | 4800 | 4800 | s_48 | s_480 | s_4800 | 2122-02-08 01:20:00 | 48 | 480 | 4800 | 4800 | 4800 | 4800 | 4800 + s_49 | s_490 | s_4900 | 2125-04-10 11:06:40 | 49 | 490 | 4900 | 4900 | 4900 | 4900 | 4900 | s_49 | s_490 | s_4900 | 2125-04-10 11:06:40 | 49 | 490 | 4900 | 4900 | 4900 | 4900 | 4900 + s_50 | s_500 | s_5000 | 2128-06-11 08:53:20 | 50 | 500 | 5000 | 5000 | 5000 | 5000 | 5000 | s_50 | s_500 | s_5000 | 2128-06-11 08:53:20 | 50 | 500 | 5000 | 5000 | 5000 | 5000 | 5000 + s_51 | s_510 | s_5100 | 2131-08-12 06:40:00 | 51 | 510 | 5100 | 5100 | 5100 | 5100 | 5100 | s_51 | s_510 | s_5100 | 2131-08-12 06:40:00 | 51 | 510 | 5100 | 5100 | 5100 | 5100 | 5100 + s_52 | s_520 | s_5200 | 2134-10-13 04:26:40 | 52 | 520 | 5200 | 5200 | 5200 | 5200 | 5200 | s_52 | s_520 | s_5200 | 2134-10-13 04:26:40 | 52 | 520 | 5200 | 5200 | 5200 | 5200 | 5200 + s_53 | s_530 | s_5300 | 2137-12-13 02:13:20 | 53 | 530 | 5300 | 5300 | 5300 | 5300 | 5300 | s_53 | s_530 | s_5300 | 2137-12-13 02:13:20 | 53 | 530 | 5300 | 5300 | 5300 | 5300 | 5300 + s_54 | s_540 | s_5400 | 2141-02-13 12:00:00 | 54 | 540 | 5400 | 5400 | 5400 | 5400 | 5400 | s_54 | s_540 | s_5400 | 2141-02-13 12:00:00 | 54 | 540 | 5400 | 5400 | 5400 | 5400 | 5400 + s_55 | s_550 | s_5500 | 2144-04-15 09:46:40 | 55 | 550 | 5500 | 5500 | 5500 | 5500 | 5500 | s_55 | s_550 | s_5500 | 2144-04-15 09:46:40 | 55 | 550 | 5500 | 5500 | 5500 | 5500 | 5500 + s_56 | s_560 | s_5600 | 2147-06-16 07:33:20 | 56 | 560 | 5600 | 5600 | 5600 | 5600 | 5600 | s_56 | s_560 | s_5600 | 2147-06-16 07:33:20 | 56 | 560 | 5600 | 5600 | 5600 | 5600 | 5600 + s_57 | s_570 | s_5700 | 2150-08-17 05:20:00 | 57 | 570 | 5700 | 5700 | 5700 | 5700 | 5700 | s_57 | s_570 | s_5700 | 2150-08-17 05:20:00 | 57 | 570 | 5700 | 5700 | 5700 | 5700 | 5700 + s_58 | s_580 | s_5800 | 2153-10-17 03:06:40 | 58 | 580 | 5800 | 5800 | 5800 | 5800 | 5800 | s_58 | s_580 | s_5800 | 2153-10-17 03:06:40 | 58 | 580 | 5800 | 5800 | 5800 | 5800 | 5800 + s_59 | s_590 | s_5900 | 2156-12-18 12:53:20 | 59 | 590 | 5900 | 5900 | 5900 | 5900 | 5900 | s_59 | s_590 | s_5900 | 2156-12-18 12:53:20 | 59 | 590 | 5900 | 5900 | 5900 | 5900 | 5900 + s_60 | s_600 | s_6000 | 2160-02-18 10:40:00 | 60 | 600 | 6000 | 6000 | 6000 | 6000 | 6000 | s_60 | s_600 | s_6000 | 2160-02-18 10:40:00 | 60 | 600 | 6000 | 6000 | 6000 | 6000 | 6000 + s_61 | s_610 | s_6100 | 2163-04-20 08:26:40 | 61 | 610 | 6100 | 6100 | 6100 | 6100 | 6100 | s_61 | s_610 | s_6100 | 2163-04-20 08:26:40 | 61 | 610 | 6100 | 6100 | 6100 | 6100 | 6100 + s_62 | s_620 | s_6200 | 2166-06-21 06:13:20 | 62 | 620 | 6200 | 6200 | 6200 | 6200 | 6200 | s_62 | s_620 | s_6200 | 2166-06-21 06:13:20 | 62 | 620 | 6200 | 6200 | 6200 | 6200 | 6200 + s_63 | s_630 | s_6300 | 2169-08-21 04:00:00 | 63 | 630 | 6300 | 6300 | 6300 | 6300 | 6300 | s_63 | s_630 | s_6300 | 2169-08-21 04:00:00 | 63 | 630 | 6300 | 6300 | 6300 | 6300 | 6300 + s_64 | s_640 | s_6400 | 2172-10-22 01:46:40 | 64 | 640 | 6400 | 6400 | 6400 | 6400 | 6400 | s_64 | s_640 | s_6400 | 2172-10-22 01:46:40 | 64 | 640 | 6400 | 6400 | 6400 | 6400 | 6400 + s_65 | s_650 | s_6500 | 2175-12-23 11:33:20 | 65 | 650 | 6500 | 6500 | 6500 | 6500 | 6500 | s_65 | s_650 | s_6500 | 2175-12-23 11:33:20 | 65 | 650 | 6500 | 6500 | 6500 | 6500 | 6500 + s_66 | s_660 | s_6600 | 2179-02-22 09:20:00 | 66 | 660 | 6600 | 6600 | 6600 | 6600 | 6600 | s_66 | s_660 | s_6600 | 2179-02-22 09:20:00 | 66 | 660 | 6600 | 6600 | 6600 | 6600 | 6600 + s_67 | s_670 | s_6700 | 2182-04-25 07:06:40 | 67 | 670 | 6700 | 6700 | 6700 | 6700 | 6700 | s_67 | s_670 | s_6700 | 2182-04-25 07:06:40 | 67 | 670 | 6700 | 6700 | 6700 | 6700 | 6700 + s_68 | s_680 | s_6800 | 2185-06-25 04:53:20 | 68 | 680 | 6800 | 6800 | 6800 | 6800 | 6800 | s_68 | s_680 | s_6800 | 2185-06-25 04:53:20 | 68 | 680 | 6800 | 6800 | 6800 | 6800 | 6800 + s_69 | s_690 | s_6900 | 2188-08-26 02:40:00 | 69 | 690 | 6900 | 6900 | 6900 | 6900 | 6900 | s_69 | s_690 | s_6900 | 2188-08-26 02:40:00 | 69 | 690 | 6900 | 6900 | 6900 | 6900 | 6900 + s_70 | s_700 | s_7000 | 2191-10-27 12:26:40 | 70 | 700 | 7000 | 7000 | 7000 | 7000 | 7000 | s_70 | s_700 | s_7000 | 2191-10-27 12:26:40 | 70 | 700 | 7000 | 7000 | 7000 | 7000 | 7000 + s_71 | s_710 | s_7100 | 2194-12-27 10:13:20 | 71 | 710 | 7100 | 7100 | 7100 | 7100 | 7100 | s_71 | s_710 | s_7100 | 2194-12-27 10:13:20 | 71 | 710 | 7100 | 7100 | 7100 | 7100 | 7100 + s_72 | s_720 | s_7200 | 2198-02-27 08:00:00 | 72 | 720 | 7200 | 7200 | 7200 | 7200 | 7200 | s_72 | s_720 | s_7200 | 2198-02-27 08:00:00 | 72 | 720 | 7200 | 7200 | 7200 | 7200 | 7200 + s_73 | s_730 | s_7300 | 2201-04-30 05:46:40 | 73 | 730 | 7300 | 7300 | 7300 | 7300 | 7300 | s_73 | s_730 | s_7300 | 2201-04-30 05:46:40 | 73 | 730 | 7300 | 7300 | 7300 | 7300 | 7300 + s_74 | s_740 | s_7400 | 2204-07-01 03:33:20 | 74 | 740 | 7400 | 7400 | 7400 | 7400 | 7400 | s_74 | s_740 | s_7400 | 2204-07-01 03:33:20 | 74 | 740 | 7400 | 7400 | 7400 | 7400 | 7400 + s_75 | s_750 | s_7500 | 2207-09-01 01:20:00 | 75 | 750 | 7500 | 7500 | 7500 | 7500 | 7500 | s_75 | s_750 | s_7500 | 2207-09-01 01:20:00 | 75 | 750 | 7500 | 7500 | 7500 | 7500 | 7500 + s_76 | s_760 | s_7600 | 2210-11-01 11:06:40 | 76 | 760 | 7600 | 7600 | 7600 | 7600 | 7600 | s_76 | s_760 | s_7600 | 2210-11-01 11:06:40 | 76 | 760 | 7600 | 7600 | 7600 | 7600 | 7600 + s_77 | s_770 | s_7700 | 2214-01-02 08:53:20 | 77 | 770 | 7700 | 7700 | 7700 | 7700 | 7700 | s_77 | s_770 | s_7700 | 2214-01-02 08:53:20 | 77 | 770 | 7700 | 7700 | 7700 | 7700 | 7700 + s_78 | s_780 | s_7800 | 2217-03-04 06:40:00 | 78 | 780 | 7800 | 7800 | 7800 | 7800 | 7800 | s_78 | s_780 | s_7800 | 2217-03-04 06:40:00 | 78 | 780 | 7800 | 7800 | 7800 | 7800 | 7800 + s_79 | s_790 | s_7900 | 2220-05-05 04:26:40 | 79 | 790 | 7900 | 7900 | 7900 | 7900 | 7900 | s_79 | s_790 | s_7900 | 2220-05-05 04:26:40 | 79 | 790 | 7900 | 7900 | 7900 | 7900 | 7900 + s_80 | s_800 | s_8000 | 2223-07-06 02:13:20 | 80 | 800 | 8000 | 8000 | 8000 | 8000 | 8000 | s_80 | s_800 | s_8000 | 2223-07-06 02:13:20 | 80 | 800 | 8000 | 8000 | 8000 | 8000 | 8000 + s_81 | s_810 | s_8100 | 2226-09-06 12:00:00 | 81 | 810 | 8100 | 8100 | 8100 | 8100 | 8100 | s_81 | s_810 | s_8100 | 2226-09-06 12:00:00 | 81 | 810 | 8100 | 8100 | 8100 | 8100 | 8100 + s_82 | s_820 | s_8200 | 2229-11-06 09:46:40 | 82 | 820 | 8200 | 8200 | 8200 | 8200 | 8200 | s_82 | s_820 | s_8200 | 2229-11-06 09:46:40 | 82 | 820 | 8200 | 8200 | 8200 | 8200 | 8200 + s_83 | s_830 | s_8300 | 2233-01-06 07:33:20 | 83 | 830 | 8300 | 8300 | 8300 | 8300 | 8300 | s_83 | s_830 | s_8300 | 2233-01-06 07:33:20 | 83 | 830 | 8300 | 8300 | 8300 | 8300 | 8300 + s_84 | s_840 | s_8400 | 2236-03-09 05:20:00 | 84 | 840 | 8400 | 8400 | 8400 | 8400 | 8400 | s_84 | s_840 | s_8400 | 2236-03-09 05:20:00 | 84 | 840 | 8400 | 8400 | 8400 | 8400 | 8400 + s_85 | s_850 | s_8500 | 2239-05-10 03:06:40 | 85 | 850 | 8500 | 8500 | 8500 | 8500 | 8500 | s_85 | s_850 | s_8500 | 2239-05-10 03:06:40 | 85 | 850 | 8500 | 8500 | 8500 | 8500 | 8500 + s_86 | s_860 | s_8600 | 2242-07-11 12:53:20 | 86 | 860 | 8600 | 8600 | 8600 | 8600 | 8600 | s_86 | s_860 | s_8600 | 2242-07-11 12:53:20 | 86 | 860 | 8600 | 8600 | 8600 | 8600 | 8600 + s_87 | s_870 | s_8700 | 2245-09-10 10:40:00 | 87 | 870 | 8700 | 8700 | 8700 | 8700 | 8700 | s_87 | s_870 | s_8700 | 2245-09-10 10:40:00 | 87 | 870 | 8700 | 8700 | 8700 | 8700 | 8700 + s_88 | s_880 | s_8800 | 2248-11-10 08:26:40 | 88 | 880 | 8800 | 8800 | 8800 | 8800 | 8800 | s_88 | s_880 | s_8800 | 2248-11-10 08:26:40 | 88 | 880 | 8800 | 8800 | 8800 | 8800 | 8800 + s_89 | s_890 | s_8900 | 2252-01-12 06:13:20 | 89 | 890 | 8900 | 8900 | 8900 | 8900 | 8900 | s_89 | s_890 | s_8900 | 2252-01-12 06:13:20 | 89 | 890 | 8900 | 8900 | 8900 | 8900 | 8900 + s_90 | s_900 | s_9000 | 2255-03-14 04:00:00 | 90 | 900 | 9000 | 9000 | 9000 | 9000 | 9000 | s_90 | s_900 | s_9000 | 2255-03-14 04:00:00 | 90 | 900 | 9000 | 9000 | 9000 | 9000 | 9000 + s_91 | s_910 | s_9100 | 2258-05-15 01:46:40 | 91 | 910 | 9100 | 9100 | 9100 | 9100 | 9100 | s_91 | s_910 | s_9100 | 2258-05-15 01:46:40 | 91 | 910 | 9100 | 9100 | 9100 | 9100 | 9100 + s_92 | s_920 | s_9200 | 2261-07-15 11:33:20 | 92 | 920 | 9200 | 9200 | 9200 | 9200 | 9200 | s_92 | s_920 | s_9200 | 2261-07-15 11:33:20 | 92 | 920 | 9200 | 9200 | 9200 | 9200 | 9200 + s_93 | s_930 | s_9300 | 2264-09-14 09:20:00 | 93 | 930 | 9300 | 9300 | 9300 | 9300 | 9300 | s_93 | s_930 | s_9300 | 2264-09-14 09:20:00 | 93 | 930 | 9300 | 9300 | 9300 | 9300 | 9300 + s_94 | s_940 | s_9400 | 2267-11-16 07:06:40 | 94 | 940 | 9400 | 9400 | 9400 | 9400 | 9400 | s_94 | s_940 | s_9400 | 2267-11-16 07:06:40 | 94 | 940 | 9400 | 9400 | 9400 | 9400 | 9400 + s_95 | s_950 | s_9500 | 2271-01-16 04:53:20 | 95 | 950 | 9500 | 9500 | 9500 | 9500 | 9500 | s_95 | s_950 | s_9500 | 2271-01-16 04:53:20 | 95 | 950 | 9500 | 9500 | 9500 | 9500 | 9500 + s_96 | s_960 | s_9600 | 2274-03-19 02:40:00 | 96 | 960 | 9600 | 9600 | 9600 | 9600 | 9600 | s_96 | s_960 | s_9600 | 2274-03-19 02:40:00 | 96 | 960 | 9600 | 9600 | 9600 | 9600 | 9600 + s_97 | s_970 | s_9700 | 2277-05-19 12:26:40 | 97 | 970 | 9700 | 9700 | 9700 | 9700 | 9700 | s_97 | s_970 | s_9700 | 2277-05-19 12:26:40 | 97 | 970 | 9700 | 9700 | 9700 | 9700 | 9700 + s_98 | s_980 | s_9800 | 2280-07-19 10:13:20 | 98 | 980 | 9800 | 9800 | 9800 | 9800 | 9800 | s_98 | s_980 | s_9800 | 2280-07-19 10:13:20 | 98 | 980 | 9800 | 9800 | 9800 | 9800 | 9800 + s_99 | s_990 | s_9900 | 2283-09-20 08:00:00 | 99 | 990 | 9900 | 9900 | 9900 | 9900 | 9900 | s_99 | s_990 | s_9900 | 2283-09-20 08:00:00 | 99 | 990 | 9900 | 9900 | 9900 | 9900 | 9900 + s_100 | s_1000 | s_10000 | 2286-11-20 05:46:40 | 100 | 1000 | 10000 | 10000 | 10000 | 10000 | 10000 | s_100 | s_1000 | s_10000 | 2286-11-20 05:46:40 | 100 | 1000 | 10000 | 10000 | 10000 | 10000 | 10000 +(100 rows) + +SELECT * from pxf_multibyte_threebyte_data_with_skip ORDER BY n1; + s1 | s2 | s3 | d1 | n1 | n2 | n3 | n4 | n5 | n6 | n7 | s11 | s12 | s13 | d11 | n11 | n12 | n13 | n14 | n15 | n16 | n17 +-------+--------+---------+---------------------+-----+------+-------+-------+-------+-------+-------+-------+--------+---------+---------------------+-----+------+-------+-------+-------+-------+------- + s_11 | s_110 | s_1100 | 2004-11-09 11:33:20 | 11 | 110 | 1100 | 1100 | 1100 | 1100 | 1100 | s_11 | s_110 | s_1100 | 2004-11-09 11:33:20 | 11 | 110 | 1100 | 1100 | 1100 | 1100 | 1100 + s_12 | s_120 | s_1200 | 2008-01-10 09:20:00 | 12 | 120 | 1200 | 1200 | 1200 | 1200 | 1200 | s_12 | s_120 | s_1200 | 2008-01-10 09:20:00 | 12 | 120 | 1200 | 1200 | 1200 | 1200 | 1200 + s_13 | s_130 | s_1300 | 2011-03-13 07:06:40 | 13 | 130 | 1300 | 1300 | 1300 | 1300 | 1300 | s_13 | s_130 | s_1300 | 2011-03-13 07:06:40 | 13 | 130 | 1300 | 1300 | 1300 | 1300 | 1300 + s_14 | s_140 | s_1400 | 2014-05-13 04:53:20 | 14 | 140 | 1400 | 1400 | 1400 | 1400 | 1400 | s_14 | s_140 | s_1400 | 2014-05-13 04:53:20 | 14 | 140 | 1400 | 1400 | 1400 | 1400 | 1400 + s_15 | s_150 | s_1500 | 2017-07-14 02:40:00 | 15 | 150 | 1500 | 1500 | 1500 | 1500 | 1500 | s_15 | s_150 | s_1500 | 2017-07-14 02:40:00 | 15 | 150 | 1500 | 1500 | 1500 | 1500 | 1500 + s_16 | s_160 | s_1600 | 2020-09-13 12:26:40 | 16 | 160 | 1600 | 1600 | 1600 | 1600 | 1600 | s_16 | s_160 | s_1600 | 2020-09-13 12:26:40 | 16 | 160 | 1600 | 1600 | 1600 | 1600 | 1600 + s_17 | s_170 | s_1700 | 2023-11-14 10:13:20 | 17 | 170 | 1700 | 1700 | 1700 | 1700 | 1700 | s_17 | s_170 | s_1700 | 2023-11-14 10:13:20 | 17 | 170 | 1700 | 1700 | 1700 | 1700 | 1700 + s_18 | s_180 | s_1800 | 2027-01-15 08:00:00 | 18 | 180 | 1800 | 1800 | 1800 | 1800 | 1800 | s_18 | s_180 | s_1800 | 2027-01-15 08:00:00 | 18 | 180 | 1800 | 1800 | 1800 | 1800 | 1800 + s_19 | s_190 | s_1900 | 2030-03-17 05:46:40 | 19 | 190 | 1900 | 1900 | 1900 | 1900 | 1900 | s_19 | s_190 | s_1900 | 2030-03-17 05:46:40 | 19 | 190 | 1900 | 1900 | 1900 | 1900 | 1900 + s_20 | s_200 | s_2000 | 2033-05-18 03:33:20 | 20 | 200 | 2000 | 2000 | 2000 | 2000 | 2000 | s_20 | s_200 | s_2000 | 2033-05-18 03:33:20 | 20 | 200 | 2000 | 2000 | 2000 | 2000 | 2000 + s_21 | s_210 | s_2100 | 2036-07-18 01:20:00 | 21 | 210 | 2100 | 2100 | 2100 | 2100 | 2100 | s_21 | s_210 | s_2100 | 2036-07-18 01:20:00 | 21 | 210 | 2100 | 2100 | 2100 | 2100 | 2100 + s_22 | s_220 | s_2200 | 2039-09-18 11:06:40 | 22 | 220 | 2200 | 2200 | 2200 | 2200 | 2200 | s_22 | s_220 | s_2200 | 2039-09-18 11:06:40 | 22 | 220 | 2200 | 2200 | 2200 | 2200 | 2200 + s_23 | s_230 | s_2300 | 2042-11-19 08:53:20 | 23 | 230 | 2300 | 2300 | 2300 | 2300 | 2300 | s_23 | s_230 | s_2300 | 2042-11-19 08:53:20 | 23 | 230 | 2300 | 2300 | 2300 | 2300 | 2300 + s_24 | s_240 | s_2400 | 2046-01-19 06:40:00 | 24 | 240 | 2400 | 2400 | 2400 | 2400 | 2400 | s_24 | s_240 | s_2400 | 2046-01-19 06:40:00 | 24 | 240 | 2400 | 2400 | 2400 | 2400 | 2400 + s_25 | s_250 | s_2500 | 2049-03-22 04:26:40 | 25 | 250 | 2500 | 2500 | 2500 | 2500 | 2500 | s_25 | s_250 | s_2500 | 2049-03-22 04:26:40 | 25 | 250 | 2500 | 2500 | 2500 | 2500 | 2500 + s_26 | s_260 | s_2600 | 2052-05-22 02:13:20 | 26 | 260 | 2600 | 2600 | 2600 | 2600 | 2600 | s_26 | s_260 | s_2600 | 2052-05-22 02:13:20 | 26 | 260 | 2600 | 2600 | 2600 | 2600 | 2600 + s_27 | s_270 | s_2700 | 2055-07-24 12:00:00 | 27 | 270 | 2700 | 2700 | 2700 | 2700 | 2700 | s_27 | s_270 | s_2700 | 2055-07-24 12:00:00 | 27 | 270 | 2700 | 2700 | 2700 | 2700 | 2700 + s_28 | s_280 | s_2800 | 2058-09-23 09:46:40 | 28 | 280 | 2800 | 2800 | 2800 | 2800 | 2800 | s_28 | s_280 | s_2800 | 2058-09-23 09:46:40 | 28 | 280 | 2800 | 2800 | 2800 | 2800 | 2800 + s_29 | s_290 | s_2900 | 2061-11-23 07:33:20 | 29 | 290 | 2900 | 2900 | 2900 | 2900 | 2900 | s_29 | s_290 | s_2900 | 2061-11-23 07:33:20 | 29 | 290 | 2900 | 2900 | 2900 | 2900 | 2900 + s_30 | s_300 | s_3000 | 2065-01-24 05:20:00 | 30 | 300 | 3000 | 3000 | 3000 | 3000 | 3000 | s_30 | s_300 | s_3000 | 2065-01-24 05:20:00 | 30 | 300 | 3000 | 3000 | 3000 | 3000 | 3000 + s_31 | s_310 | s_3100 | 2068-03-26 03:06:40 | 31 | 310 | 3100 | 3100 | 3100 | 3100 | 3100 | s_31 | s_310 | s_3100 | 2068-03-26 03:06:40 | 31 | 310 | 3100 | 3100 | 3100 | 3100 | 3100 + s_32 | s_320 | s_3200 | 2071-05-28 12:53:20 | 32 | 320 | 3200 | 3200 | 3200 | 3200 | 3200 | s_32 | s_320 | s_3200 | 2071-05-28 12:53:20 | 32 | 320 | 3200 | 3200 | 3200 | 3200 | 3200 + s_33 | s_330 | s_3300 | 2074-07-28 10:40:00 | 33 | 330 | 3300 | 3300 | 3300 | 3300 | 3300 | s_33 | s_330 | s_3300 | 2074-07-28 10:40:00 | 33 | 330 | 3300 | 3300 | 3300 | 3300 | 3300 + s_34 | s_340 | s_3400 | 2077-09-27 08:26:40 | 34 | 340 | 3400 | 3400 | 3400 | 3400 | 3400 | s_34 | s_340 | s_3400 | 2077-09-27 08:26:40 | 34 | 340 | 3400 | 3400 | 3400 | 3400 | 3400 + s_35 | s_350 | s_3500 | 2080-11-28 06:13:20 | 35 | 350 | 3500 | 3500 | 3500 | 3500 | 3500 | s_35 | s_350 | s_3500 | 2080-11-28 06:13:20 | 35 | 350 | 3500 | 3500 | 3500 | 3500 | 3500 + s_36 | s_360 | s_3600 | 2084-01-29 04:00:00 | 36 | 360 | 3600 | 3600 | 3600 | 3600 | 3600 | s_36 | s_360 | s_3600 | 2084-01-29 04:00:00 | 36 | 360 | 3600 | 3600 | 3600 | 3600 | 3600 + s_37 | s_370 | s_3700 | 2087-04-01 01:46:40 | 37 | 370 | 3700 | 3700 | 3700 | 3700 | 3700 | s_37 | s_370 | s_3700 | 2087-04-01 01:46:40 | 37 | 370 | 3700 | 3700 | 3700 | 3700 | 3700 + s_38 | s_380 | s_3800 | 2090-06-01 11:33:20 | 38 | 380 | 3800 | 3800 | 3800 | 3800 | 3800 | s_38 | s_380 | s_3800 | 2090-06-01 11:33:20 | 38 | 380 | 3800 | 3800 | 3800 | 3800 | 3800 + s_39 | s_390 | s_3900 | 2093-08-01 09:20:00 | 39 | 390 | 3900 | 3900 | 3900 | 3900 | 3900 | s_39 | s_390 | s_3900 | 2093-08-01 09:20:00 | 39 | 390 | 3900 | 3900 | 3900 | 3900 | 3900 + s_40 | s_400 | s_4000 | 2096-10-02 07:06:40 | 40 | 400 | 4000 | 4000 | 4000 | 4000 | 4000 | s_40 | s_400 | s_4000 | 2096-10-02 07:06:40 | 40 | 400 | 4000 | 4000 | 4000 | 4000 | 4000 + s_41 | s_410 | s_4100 | 2099-12-03 04:53:20 | 41 | 410 | 4100 | 4100 | 4100 | 4100 | 4100 | s_41 | s_410 | s_4100 | 2099-12-03 04:53:20 | 41 | 410 | 4100 | 4100 | 4100 | 4100 | 4100 + s_42 | s_420 | s_4200 | 2103-02-04 02:40:00 | 42 | 420 | 4200 | 4200 | 4200 | 4200 | 4200 | s_42 | s_420 | s_4200 | 2103-02-04 02:40:00 | 42 | 420 | 4200 | 4200 | 4200 | 4200 | 4200 + s_43 | s_430 | s_4300 | 2106-04-06 12:26:40 | 43 | 430 | 4300 | 4300 | 4300 | 4300 | 4300 | s_43 | s_430 | s_4300 | 2106-04-06 12:26:40 | 43 | 430 | 4300 | 4300 | 4300 | 4300 | 4300 + s_44 | s_440 | s_4400 | 2109-06-06 10:13:20 | 44 | 440 | 4400 | 4400 | 4400 | 4400 | 4400 | s_44 | s_440 | s_4400 | 2109-06-06 10:13:20 | 44 | 440 | 4400 | 4400 | 4400 | 4400 | 4400 + s_45 | s_450 | s_4500 | 2112-08-07 08:00:00 | 45 | 450 | 4500 | 4500 | 4500 | 4500 | 4500 | s_45 | s_450 | s_4500 | 2112-08-07 08:00:00 | 45 | 450 | 4500 | 4500 | 4500 | 4500 | 4500 + s_46 | s_460 | s_4600 | 2115-10-08 05:46:40 | 46 | 460 | 4600 | 4600 | 4600 | 4600 | 4600 | s_46 | s_460 | s_4600 | 2115-10-08 05:46:40 | 46 | 460 | 4600 | 4600 | 4600 | 4600 | 4600 + s_47 | s_470 | s_4700 | 2118-12-09 03:33:20 | 47 | 470 | 4700 | 4700 | 4700 | 4700 | 4700 | s_47 | s_470 | s_4700 | 2118-12-09 03:33:20 | 47 | 470 | 4700 | 4700 | 4700 | 4700 | 4700 + s_48 | s_480 | s_4800 | 2122-02-08 01:20:00 | 48 | 480 | 4800 | 4800 | 4800 | 4800 | 4800 | s_48 | s_480 | s_4800 | 2122-02-08 01:20:00 | 48 | 480 | 4800 | 4800 | 4800 | 4800 | 4800 + s_49 | s_490 | s_4900 | 2125-04-10 11:06:40 | 49 | 490 | 4900 | 4900 | 4900 | 4900 | 4900 | s_49 | s_490 | s_4900 | 2125-04-10 11:06:40 | 49 | 490 | 4900 | 4900 | 4900 | 4900 | 4900 + s_50 | s_500 | s_5000 | 2128-06-11 08:53:20 | 50 | 500 | 5000 | 5000 | 5000 | 5000 | 5000 | s_50 | s_500 | s_5000 | 2128-06-11 08:53:20 | 50 | 500 | 5000 | 5000 | 5000 | 5000 | 5000 + s_51 | s_510 | s_5100 | 2131-08-12 06:40:00 | 51 | 510 | 5100 | 5100 | 5100 | 5100 | 5100 | s_51 | s_510 | s_5100 | 2131-08-12 06:40:00 | 51 | 510 | 5100 | 5100 | 5100 | 5100 | 5100 + s_52 | s_520 | s_5200 | 2134-10-13 04:26:40 | 52 | 520 | 5200 | 5200 | 5200 | 5200 | 5200 | s_52 | s_520 | s_5200 | 2134-10-13 04:26:40 | 52 | 520 | 5200 | 5200 | 5200 | 5200 | 5200 + s_53 | s_530 | s_5300 | 2137-12-13 02:13:20 | 53 | 530 | 5300 | 5300 | 5300 | 5300 | 5300 | s_53 | s_530 | s_5300 | 2137-12-13 02:13:20 | 53 | 530 | 5300 | 5300 | 5300 | 5300 | 5300 + s_54 | s_540 | s_5400 | 2141-02-13 12:00:00 | 54 | 540 | 5400 | 5400 | 5400 | 5400 | 5400 | s_54 | s_540 | s_5400 | 2141-02-13 12:00:00 | 54 | 540 | 5400 | 5400 | 5400 | 5400 | 5400 + s_55 | s_550 | s_5500 | 2144-04-15 09:46:40 | 55 | 550 | 5500 | 5500 | 5500 | 5500 | 5500 | s_55 | s_550 | s_5500 | 2144-04-15 09:46:40 | 55 | 550 | 5500 | 5500 | 5500 | 5500 | 5500 + s_56 | s_560 | s_5600 | 2147-06-16 07:33:20 | 56 | 560 | 5600 | 5600 | 5600 | 5600 | 5600 | s_56 | s_560 | s_5600 | 2147-06-16 07:33:20 | 56 | 560 | 5600 | 5600 | 5600 | 5600 | 5600 + s_57 | s_570 | s_5700 | 2150-08-17 05:20:00 | 57 | 570 | 5700 | 5700 | 5700 | 5700 | 5700 | s_57 | s_570 | s_5700 | 2150-08-17 05:20:00 | 57 | 570 | 5700 | 5700 | 5700 | 5700 | 5700 + s_58 | s_580 | s_5800 | 2153-10-17 03:06:40 | 58 | 580 | 5800 | 5800 | 5800 | 5800 | 5800 | s_58 | s_580 | s_5800 | 2153-10-17 03:06:40 | 58 | 580 | 5800 | 5800 | 5800 | 5800 | 5800 + s_59 | s_590 | s_5900 | 2156-12-18 12:53:20 | 59 | 590 | 5900 | 5900 | 5900 | 5900 | 5900 | s_59 | s_590 | s_5900 | 2156-12-18 12:53:20 | 59 | 590 | 5900 | 5900 | 5900 | 5900 | 5900 + s_60 | s_600 | s_6000 | 2160-02-18 10:40:00 | 60 | 600 | 6000 | 6000 | 6000 | 6000 | 6000 | s_60 | s_600 | s_6000 | 2160-02-18 10:40:00 | 60 | 600 | 6000 | 6000 | 6000 | 6000 | 6000 + s_61 | s_610 | s_6100 | 2163-04-20 08:26:40 | 61 | 610 | 6100 | 6100 | 6100 | 6100 | 6100 | s_61 | s_610 | s_6100 | 2163-04-20 08:26:40 | 61 | 610 | 6100 | 6100 | 6100 | 6100 | 6100 + s_62 | s_620 | s_6200 | 2166-06-21 06:13:20 | 62 | 620 | 6200 | 6200 | 6200 | 6200 | 6200 | s_62 | s_620 | s_6200 | 2166-06-21 06:13:20 | 62 | 620 | 6200 | 6200 | 6200 | 6200 | 6200 + s_63 | s_630 | s_6300 | 2169-08-21 04:00:00 | 63 | 630 | 6300 | 6300 | 6300 | 6300 | 6300 | s_63 | s_630 | s_6300 | 2169-08-21 04:00:00 | 63 | 630 | 6300 | 6300 | 6300 | 6300 | 6300 + s_64 | s_640 | s_6400 | 2172-10-22 01:46:40 | 64 | 640 | 6400 | 6400 | 6400 | 6400 | 6400 | s_64 | s_640 | s_6400 | 2172-10-22 01:46:40 | 64 | 640 | 6400 | 6400 | 6400 | 6400 | 6400 + s_65 | s_650 | s_6500 | 2175-12-23 11:33:20 | 65 | 650 | 6500 | 6500 | 6500 | 6500 | 6500 | s_65 | s_650 | s_6500 | 2175-12-23 11:33:20 | 65 | 650 | 6500 | 6500 | 6500 | 6500 | 6500 + s_66 | s_660 | s_6600 | 2179-02-22 09:20:00 | 66 | 660 | 6600 | 6600 | 6600 | 6600 | 6600 | s_66 | s_660 | s_6600 | 2179-02-22 09:20:00 | 66 | 660 | 6600 | 6600 | 6600 | 6600 | 6600 + s_67 | s_670 | s_6700 | 2182-04-25 07:06:40 | 67 | 670 | 6700 | 6700 | 6700 | 6700 | 6700 | s_67 | s_670 | s_6700 | 2182-04-25 07:06:40 | 67 | 670 | 6700 | 6700 | 6700 | 6700 | 6700 + s_68 | s_680 | s_6800 | 2185-06-25 04:53:20 | 68 | 680 | 6800 | 6800 | 6800 | 6800 | 6800 | s_68 | s_680 | s_6800 | 2185-06-25 04:53:20 | 68 | 680 | 6800 | 6800 | 6800 | 6800 | 6800 + s_69 | s_690 | s_6900 | 2188-08-26 02:40:00 | 69 | 690 | 6900 | 6900 | 6900 | 6900 | 6900 | s_69 | s_690 | s_6900 | 2188-08-26 02:40:00 | 69 | 690 | 6900 | 6900 | 6900 | 6900 | 6900 + s_70 | s_700 | s_7000 | 2191-10-27 12:26:40 | 70 | 700 | 7000 | 7000 | 7000 | 7000 | 7000 | s_70 | s_700 | s_7000 | 2191-10-27 12:26:40 | 70 | 700 | 7000 | 7000 | 7000 | 7000 | 7000 + s_71 | s_710 | s_7100 | 2194-12-27 10:13:20 | 71 | 710 | 7100 | 7100 | 7100 | 7100 | 7100 | s_71 | s_710 | s_7100 | 2194-12-27 10:13:20 | 71 | 710 | 7100 | 7100 | 7100 | 7100 | 7100 + s_72 | s_720 | s_7200 | 2198-02-27 08:00:00 | 72 | 720 | 7200 | 7200 | 7200 | 7200 | 7200 | s_72 | s_720 | s_7200 | 2198-02-27 08:00:00 | 72 | 720 | 7200 | 7200 | 7200 | 7200 | 7200 + s_73 | s_730 | s_7300 | 2201-04-30 05:46:40 | 73 | 730 | 7300 | 7300 | 7300 | 7300 | 7300 | s_73 | s_730 | s_7300 | 2201-04-30 05:46:40 | 73 | 730 | 7300 | 7300 | 7300 | 7300 | 7300 + s_74 | s_740 | s_7400 | 2204-07-01 03:33:20 | 74 | 740 | 7400 | 7400 | 7400 | 7400 | 7400 | s_74 | s_740 | s_7400 | 2204-07-01 03:33:20 | 74 | 740 | 7400 | 7400 | 7400 | 7400 | 7400 + s_75 | s_750 | s_7500 | 2207-09-01 01:20:00 | 75 | 750 | 7500 | 7500 | 7500 | 7500 | 7500 | s_75 | s_750 | s_7500 | 2207-09-01 01:20:00 | 75 | 750 | 7500 | 7500 | 7500 | 7500 | 7500 + s_76 | s_760 | s_7600 | 2210-11-01 11:06:40 | 76 | 760 | 7600 | 7600 | 7600 | 7600 | 7600 | s_76 | s_760 | s_7600 | 2210-11-01 11:06:40 | 76 | 760 | 7600 | 7600 | 7600 | 7600 | 7600 + s_77 | s_770 | s_7700 | 2214-01-02 08:53:20 | 77 | 770 | 7700 | 7700 | 7700 | 7700 | 7700 | s_77 | s_770 | s_7700 | 2214-01-02 08:53:20 | 77 | 770 | 7700 | 7700 | 7700 | 7700 | 7700 + s_78 | s_780 | s_7800 | 2217-03-04 06:40:00 | 78 | 780 | 7800 | 7800 | 7800 | 7800 | 7800 | s_78 | s_780 | s_7800 | 2217-03-04 06:40:00 | 78 | 780 | 7800 | 7800 | 7800 | 7800 | 7800 + s_79 | s_790 | s_7900 | 2220-05-05 04:26:40 | 79 | 790 | 7900 | 7900 | 7900 | 7900 | 7900 | s_79 | s_790 | s_7900 | 2220-05-05 04:26:40 | 79 | 790 | 7900 | 7900 | 7900 | 7900 | 7900 + s_80 | s_800 | s_8000 | 2223-07-06 02:13:20 | 80 | 800 | 8000 | 8000 | 8000 | 8000 | 8000 | s_80 | s_800 | s_8000 | 2223-07-06 02:13:20 | 80 | 800 | 8000 | 8000 | 8000 | 8000 | 8000 + s_81 | s_810 | s_8100 | 2226-09-06 12:00:00 | 81 | 810 | 8100 | 8100 | 8100 | 8100 | 8100 | s_81 | s_810 | s_8100 | 2226-09-06 12:00:00 | 81 | 810 | 8100 | 8100 | 8100 | 8100 | 8100 + s_82 | s_820 | s_8200 | 2229-11-06 09:46:40 | 82 | 820 | 8200 | 8200 | 8200 | 8200 | 8200 | s_82 | s_820 | s_8200 | 2229-11-06 09:46:40 | 82 | 820 | 8200 | 8200 | 8200 | 8200 | 8200 + s_83 | s_830 | s_8300 | 2233-01-06 07:33:20 | 83 | 830 | 8300 | 8300 | 8300 | 8300 | 8300 | s_83 | s_830 | s_8300 | 2233-01-06 07:33:20 | 83 | 830 | 8300 | 8300 | 8300 | 8300 | 8300 + s_84 | s_840 | s_8400 | 2236-03-09 05:20:00 | 84 | 840 | 8400 | 8400 | 8400 | 8400 | 8400 | s_84 | s_840 | s_8400 | 2236-03-09 05:20:00 | 84 | 840 | 8400 | 8400 | 8400 | 8400 | 8400 + s_85 | s_850 | s_8500 | 2239-05-10 03:06:40 | 85 | 850 | 8500 | 8500 | 8500 | 8500 | 8500 | s_85 | s_850 | s_8500 | 2239-05-10 03:06:40 | 85 | 850 | 8500 | 8500 | 8500 | 8500 | 8500 + s_86 | s_860 | s_8600 | 2242-07-11 12:53:20 | 86 | 860 | 8600 | 8600 | 8600 | 8600 | 8600 | s_86 | s_860 | s_8600 | 2242-07-11 12:53:20 | 86 | 860 | 8600 | 8600 | 8600 | 8600 | 8600 + s_87 | s_870 | s_8700 | 2245-09-10 10:40:00 | 87 | 870 | 8700 | 8700 | 8700 | 8700 | 8700 | s_87 | s_870 | s_8700 | 2245-09-10 10:40:00 | 87 | 870 | 8700 | 8700 | 8700 | 8700 | 8700 + s_88 | s_880 | s_8800 | 2248-11-10 08:26:40 | 88 | 880 | 8800 | 8800 | 8800 | 8800 | 8800 | s_88 | s_880 | s_8800 | 2248-11-10 08:26:40 | 88 | 880 | 8800 | 8800 | 8800 | 8800 | 8800 + s_89 | s_890 | s_8900 | 2252-01-12 06:13:20 | 89 | 890 | 8900 | 8900 | 8900 | 8900 | 8900 | s_89 | s_890 | s_8900 | 2252-01-12 06:13:20 | 89 | 890 | 8900 | 8900 | 8900 | 8900 | 8900 + s_90 | s_900 | s_9000 | 2255-03-14 04:00:00 | 90 | 900 | 9000 | 9000 | 9000 | 9000 | 9000 | s_90 | s_900 | s_9000 | 2255-03-14 04:00:00 | 90 | 900 | 9000 | 9000 | 9000 | 9000 | 9000 + s_91 | s_910 | s_9100 | 2258-05-15 01:46:40 | 91 | 910 | 9100 | 9100 | 9100 | 9100 | 9100 | s_91 | s_910 | s_9100 | 2258-05-15 01:46:40 | 91 | 910 | 9100 | 9100 | 9100 | 9100 | 9100 + s_92 | s_920 | s_9200 | 2261-07-15 11:33:20 | 92 | 920 | 9200 | 9200 | 9200 | 9200 | 9200 | s_92 | s_920 | s_9200 | 2261-07-15 11:33:20 | 92 | 920 | 9200 | 9200 | 9200 | 9200 | 9200 + s_93 | s_930 | s_9300 | 2264-09-14 09:20:00 | 93 | 930 | 9300 | 9300 | 9300 | 9300 | 9300 | s_93 | s_930 | s_9300 | 2264-09-14 09:20:00 | 93 | 930 | 9300 | 9300 | 9300 | 9300 | 9300 + s_94 | s_940 | s_9400 | 2267-11-16 07:06:40 | 94 | 940 | 9400 | 9400 | 9400 | 9400 | 9400 | s_94 | s_940 | s_9400 | 2267-11-16 07:06:40 | 94 | 940 | 9400 | 9400 | 9400 | 9400 | 9400 + s_95 | s_950 | s_9500 | 2271-01-16 04:53:20 | 95 | 950 | 9500 | 9500 | 9500 | 9500 | 9500 | s_95 | s_950 | s_9500 | 2271-01-16 04:53:20 | 95 | 950 | 9500 | 9500 | 9500 | 9500 | 9500 + s_96 | s_960 | s_9600 | 2274-03-19 02:40:00 | 96 | 960 | 9600 | 9600 | 9600 | 9600 | 9600 | s_96 | s_960 | s_9600 | 2274-03-19 02:40:00 | 96 | 960 | 9600 | 9600 | 9600 | 9600 | 9600 + s_97 | s_970 | s_9700 | 2277-05-19 12:26:40 | 97 | 970 | 9700 | 9700 | 9700 | 9700 | 9700 | s_97 | s_970 | s_9700 | 2277-05-19 12:26:40 | 97 | 970 | 9700 | 9700 | 9700 | 9700 | 9700 + s_98 | s_980 | s_9800 | 2280-07-19 10:13:20 | 98 | 980 | 9800 | 9800 | 9800 | 9800 | 9800 | s_98 | s_980 | s_9800 | 2280-07-19 10:13:20 | 98 | 980 | 9800 | 9800 | 9800 | 9800 | 9800 + s_99 | s_990 | s_9900 | 2283-09-20 08:00:00 | 99 | 990 | 9900 | 9900 | 9900 | 9900 | 9900 | s_99 | s_990 | s_9900 | 2283-09-20 08:00:00 | 99 | 990 | 9900 | 9900 | 9900 | 9900 | 9900 + s_100 | s_1000 | s_10000 | 2286-11-20 05:46:40 | 100 | 1000 | 10000 | 10000 | 10000 | 10000 | 10000 | s_100 | s_1000 | s_10000 | 2286-11-20 05:46:40 | 100 | 1000 | 10000 | 10000 | 10000 | 10000 | 10000 +(90 rows) \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/three_byte/runTest.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/three_byte/runTest.py new file mode 100755 index 0000000000..17a06ae190 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/three_byte/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfMultibyteThreeByteDelimiterData(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/three_byte/sql/query01.sql b/automation/tincrepo/main/pxf/features/multibyte_delimiter/three_byte/sql/query01.sql new file mode 100755 index 0000000000..58cd12fea7 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/three_byte/sql/query01.sql @@ -0,0 +1,5 @@ +-- @description query01 for PXF Multibyte delimiter, 3-byte delim cases + +SELECT * from pxf_multibyte_threebyte_data ORDER BY n1; + +SELECT * from pxf_multibyte_threebyte_data_with_skip ORDER BY n1; \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte/__init__.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte/__init__.py new file mode 100755 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte/expected/query01.ans b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte/expected/query01.ans new file mode 100755 index 0000000000..eb79289767 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte/expected/query01.ans @@ -0,0 +1,201 @@ +-- @description query01 for PXF Multibyte delimiter, 2-byte delim cases + +SELECT * from pxf_multibyte_twobyte_data ORDER BY n1; + s1 | s2 | s3 | d1 | n1 | n2 | n3 | n4 | n5 | n6 | n7 | s11 | s12 | s13 | d11 | n11 | n12 | n13 | n14 | n15 | n16 | n17 +-------+--------+---------+---------------------+-----+------+-------+-------+-------+-------+-------+-------+--------+---------+---------------------+-----+------+-------+-------+-------+-------+------- + s_1 | s_10 | s_100 | 1973-03-03 09:46:40 | 1 | 10 | 100 | 100 | 100 | 100 | 100 | s_1 | s_10 | s_100 | 1973-03-03 09:46:40 | 1 | 10 | 100 | 100 | 100 | 100 | 100 + s_2 | s_20 | s_200 | 1976-05-03 07:33:20 | 2 | 20 | 200 | 200 | 200 | 200 | 200 | s_2 | s_20 | s_200 | 1976-05-03 07:33:20 | 2 | 20 | 200 | 200 | 200 | 200 | 200 + s_3 | s_30 | s_300 | 1979-07-05 05:20:00 | 3 | 30 | 300 | 300 | 300 | 300 | 300 | s_3 | s_30 | s_300 | 1979-07-05 05:20:00 | 3 | 30 | 300 | 300 | 300 | 300 | 300 + s_4 | s_40 | s_400 | 1982-09-04 03:06:40 | 4 | 40 | 400 | 400 | 400 | 400 | 400 | s_4 | s_40 | s_400 | 1982-09-04 03:06:40 | 4 | 40 | 400 | 400 | 400 | 400 | 400 + s_5 | s_50 | s_500 | 1985-11-05 12:53:20 | 5 | 50 | 500 | 500 | 500 | 500 | 500 | s_5 | s_50 | s_500 | 1985-11-05 12:53:20 | 5 | 50 | 500 | 500 | 500 | 500 | 500 + s_6 | s_60 | s_600 | 1989-01-05 10:40:00 | 6 | 60 | 600 | 600 | 600 | 600 | 600 | s_6 | s_60 | s_600 | 1989-01-05 10:40:00 | 6 | 60 | 600 | 600 | 600 | 600 | 600 + s_7 | s_70 | s_700 | 1992-03-07 08:26:40 | 7 | 70 | 700 | 700 | 700 | 700 | 700 | s_7 | s_70 | s_700 | 1992-03-07 08:26:40 | 7 | 70 | 700 | 700 | 700 | 700 | 700 + s_8 | s_80 | s_800 | 1995-05-09 06:13:20 | 8 | 80 | 800 | 800 | 800 | 800 | 800 | s_8 | s_80 | s_800 | 1995-05-09 06:13:20 | 8 | 80 | 800 | 800 | 800 | 800 | 800 + s_9 | s_90 | s_900 | 1998-07-09 04:00:00 | 9 | 90 | 900 | 900 | 900 | 900 | 900 | s_9 | s_90 | s_900 | 1998-07-09 04:00:00 | 9 | 90 | 900 | 900 | 900 | 900 | 900 + s_10 | s_100 | s_1000 | 2001-09-09 01:46:40 | 10 | 100 | 1000 | 1000 | 1000 | 1000 | 1000 | s_10 | s_100 | s_1000 | 2001-09-09 01:46:40 | 10 | 100 | 1000 | 1000 | 1000 | 1000 | 1000 + s_11 | s_110 | s_1100 | 2004-11-09 11:33:20 | 11 | 110 | 1100 | 1100 | 1100 | 1100 | 1100 | s_11 | s_110 | s_1100 | 2004-11-09 11:33:20 | 11 | 110 | 1100 | 1100 | 1100 | 1100 | 1100 + s_12 | s_120 | s_1200 | 2008-01-10 09:20:00 | 12 | 120 | 1200 | 1200 | 1200 | 1200 | 1200 | s_12 | s_120 | s_1200 | 2008-01-10 09:20:00 | 12 | 120 | 1200 | 1200 | 1200 | 1200 | 1200 + s_13 | s_130 | s_1300 | 2011-03-13 07:06:40 | 13 | 130 | 1300 | 1300 | 1300 | 1300 | 1300 | s_13 | s_130 | s_1300 | 2011-03-13 07:06:40 | 13 | 130 | 1300 | 1300 | 1300 | 1300 | 1300 + s_14 | s_140 | s_1400 | 2014-05-13 04:53:20 | 14 | 140 | 1400 | 1400 | 1400 | 1400 | 1400 | s_14 | s_140 | s_1400 | 2014-05-13 04:53:20 | 14 | 140 | 1400 | 1400 | 1400 | 1400 | 1400 + s_15 | s_150 | s_1500 | 2017-07-14 02:40:00 | 15 | 150 | 1500 | 1500 | 1500 | 1500 | 1500 | s_15 | s_150 | s_1500 | 2017-07-14 02:40:00 | 15 | 150 | 1500 | 1500 | 1500 | 1500 | 1500 + s_16 | s_160 | s_1600 | 2020-09-13 12:26:40 | 16 | 160 | 1600 | 1600 | 1600 | 1600 | 1600 | s_16 | s_160 | s_1600 | 2020-09-13 12:26:40 | 16 | 160 | 1600 | 1600 | 1600 | 1600 | 1600 + s_17 | s_170 | s_1700 | 2023-11-14 10:13:20 | 17 | 170 | 1700 | 1700 | 1700 | 1700 | 1700 | s_17 | s_170 | s_1700 | 2023-11-14 10:13:20 | 17 | 170 | 1700 | 1700 | 1700 | 1700 | 1700 + s_18 | s_180 | s_1800 | 2027-01-15 08:00:00 | 18 | 180 | 1800 | 1800 | 1800 | 1800 | 1800 | s_18 | s_180 | s_1800 | 2027-01-15 08:00:00 | 18 | 180 | 1800 | 1800 | 1800 | 1800 | 1800 + s_19 | s_190 | s_1900 | 2030-03-17 05:46:40 | 19 | 190 | 1900 | 1900 | 1900 | 1900 | 1900 | s_19 | s_190 | s_1900 | 2030-03-17 05:46:40 | 19 | 190 | 1900 | 1900 | 1900 | 1900 | 1900 + s_20 | s_200 | s_2000 | 2033-05-18 03:33:20 | 20 | 200 | 2000 | 2000 | 2000 | 2000 | 2000 | s_20 | s_200 | s_2000 | 2033-05-18 03:33:20 | 20 | 200 | 2000 | 2000 | 2000 | 2000 | 2000 + s_21 | s_210 | s_2100 | 2036-07-18 01:20:00 | 21 | 210 | 2100 | 2100 | 2100 | 2100 | 2100 | s_21 | s_210 | s_2100 | 2036-07-18 01:20:00 | 21 | 210 | 2100 | 2100 | 2100 | 2100 | 2100 + s_22 | s_220 | s_2200 | 2039-09-18 11:06:40 | 22 | 220 | 2200 | 2200 | 2200 | 2200 | 2200 | s_22 | s_220 | s_2200 | 2039-09-18 11:06:40 | 22 | 220 | 2200 | 2200 | 2200 | 2200 | 2200 + s_23 | s_230 | s_2300 | 2042-11-19 08:53:20 | 23 | 230 | 2300 | 2300 | 2300 | 2300 | 2300 | s_23 | s_230 | s_2300 | 2042-11-19 08:53:20 | 23 | 230 | 2300 | 2300 | 2300 | 2300 | 2300 + s_24 | s_240 | s_2400 | 2046-01-19 06:40:00 | 24 | 240 | 2400 | 2400 | 2400 | 2400 | 2400 | s_24 | s_240 | s_2400 | 2046-01-19 06:40:00 | 24 | 240 | 2400 | 2400 | 2400 | 2400 | 2400 + s_25 | s_250 | s_2500 | 2049-03-22 04:26:40 | 25 | 250 | 2500 | 2500 | 2500 | 2500 | 2500 | s_25 | s_250 | s_2500 | 2049-03-22 04:26:40 | 25 | 250 | 2500 | 2500 | 2500 | 2500 | 2500 + s_26 | s_260 | s_2600 | 2052-05-22 02:13:20 | 26 | 260 | 2600 | 2600 | 2600 | 2600 | 2600 | s_26 | s_260 | s_2600 | 2052-05-22 02:13:20 | 26 | 260 | 2600 | 2600 | 2600 | 2600 | 2600 + s_27 | s_270 | s_2700 | 2055-07-24 12:00:00 | 27 | 270 | 2700 | 2700 | 2700 | 2700 | 2700 | s_27 | s_270 | s_2700 | 2055-07-24 12:00:00 | 27 | 270 | 2700 | 2700 | 2700 | 2700 | 2700 + s_28 | s_280 | s_2800 | 2058-09-23 09:46:40 | 28 | 280 | 2800 | 2800 | 2800 | 2800 | 2800 | s_28 | s_280 | s_2800 | 2058-09-23 09:46:40 | 28 | 280 | 2800 | 2800 | 2800 | 2800 | 2800 + s_29 | s_290 | s_2900 | 2061-11-23 07:33:20 | 29 | 290 | 2900 | 2900 | 2900 | 2900 | 2900 | s_29 | s_290 | s_2900 | 2061-11-23 07:33:20 | 29 | 290 | 2900 | 2900 | 2900 | 2900 | 2900 + s_30 | s_300 | s_3000 | 2065-01-24 05:20:00 | 30 | 300 | 3000 | 3000 | 3000 | 3000 | 3000 | s_30 | s_300 | s_3000 | 2065-01-24 05:20:00 | 30 | 300 | 3000 | 3000 | 3000 | 3000 | 3000 + s_31 | s_310 | s_3100 | 2068-03-26 03:06:40 | 31 | 310 | 3100 | 3100 | 3100 | 3100 | 3100 | s_31 | s_310 | s_3100 | 2068-03-26 03:06:40 | 31 | 310 | 3100 | 3100 | 3100 | 3100 | 3100 + s_32 | s_320 | s_3200 | 2071-05-28 12:53:20 | 32 | 320 | 3200 | 3200 | 3200 | 3200 | 3200 | s_32 | s_320 | s_3200 | 2071-05-28 12:53:20 | 32 | 320 | 3200 | 3200 | 3200 | 3200 | 3200 + s_33 | s_330 | s_3300 | 2074-07-28 10:40:00 | 33 | 330 | 3300 | 3300 | 3300 | 3300 | 3300 | s_33 | s_330 | s_3300 | 2074-07-28 10:40:00 | 33 | 330 | 3300 | 3300 | 3300 | 3300 | 3300 + s_34 | s_340 | s_3400 | 2077-09-27 08:26:40 | 34 | 340 | 3400 | 3400 | 3400 | 3400 | 3400 | s_34 | s_340 | s_3400 | 2077-09-27 08:26:40 | 34 | 340 | 3400 | 3400 | 3400 | 3400 | 3400 + s_35 | s_350 | s_3500 | 2080-11-28 06:13:20 | 35 | 350 | 3500 | 3500 | 3500 | 3500 | 3500 | s_35 | s_350 | s_3500 | 2080-11-28 06:13:20 | 35 | 350 | 3500 | 3500 | 3500 | 3500 | 3500 + s_36 | s_360 | s_3600 | 2084-01-29 04:00:00 | 36 | 360 | 3600 | 3600 | 3600 | 3600 | 3600 | s_36 | s_360 | s_3600 | 2084-01-29 04:00:00 | 36 | 360 | 3600 | 3600 | 3600 | 3600 | 3600 + s_37 | s_370 | s_3700 | 2087-04-01 01:46:40 | 37 | 370 | 3700 | 3700 | 3700 | 3700 | 3700 | s_37 | s_370 | s_3700 | 2087-04-01 01:46:40 | 37 | 370 | 3700 | 3700 | 3700 | 3700 | 3700 + s_38 | s_380 | s_3800 | 2090-06-01 11:33:20 | 38 | 380 | 3800 | 3800 | 3800 | 3800 | 3800 | s_38 | s_380 | s_3800 | 2090-06-01 11:33:20 | 38 | 380 | 3800 | 3800 | 3800 | 3800 | 3800 + s_39 | s_390 | s_3900 | 2093-08-01 09:20:00 | 39 | 390 | 3900 | 3900 | 3900 | 3900 | 3900 | s_39 | s_390 | s_3900 | 2093-08-01 09:20:00 | 39 | 390 | 3900 | 3900 | 3900 | 3900 | 3900 + s_40 | s_400 | s_4000 | 2096-10-02 07:06:40 | 40 | 400 | 4000 | 4000 | 4000 | 4000 | 4000 | s_40 | s_400 | s_4000 | 2096-10-02 07:06:40 | 40 | 400 | 4000 | 4000 | 4000 | 4000 | 4000 + s_41 | s_410 | s_4100 | 2099-12-03 04:53:20 | 41 | 410 | 4100 | 4100 | 4100 | 4100 | 4100 | s_41 | s_410 | s_4100 | 2099-12-03 04:53:20 | 41 | 410 | 4100 | 4100 | 4100 | 4100 | 4100 + s_42 | s_420 | s_4200 | 2103-02-04 02:40:00 | 42 | 420 | 4200 | 4200 | 4200 | 4200 | 4200 | s_42 | s_420 | s_4200 | 2103-02-04 02:40:00 | 42 | 420 | 4200 | 4200 | 4200 | 4200 | 4200 + s_43 | s_430 | s_4300 | 2106-04-06 12:26:40 | 43 | 430 | 4300 | 4300 | 4300 | 4300 | 4300 | s_43 | s_430 | s_4300 | 2106-04-06 12:26:40 | 43 | 430 | 4300 | 4300 | 4300 | 4300 | 4300 + s_44 | s_440 | s_4400 | 2109-06-06 10:13:20 | 44 | 440 | 4400 | 4400 | 4400 | 4400 | 4400 | s_44 | s_440 | s_4400 | 2109-06-06 10:13:20 | 44 | 440 | 4400 | 4400 | 4400 | 4400 | 4400 + s_45 | s_450 | s_4500 | 2112-08-07 08:00:00 | 45 | 450 | 4500 | 4500 | 4500 | 4500 | 4500 | s_45 | s_450 | s_4500 | 2112-08-07 08:00:00 | 45 | 450 | 4500 | 4500 | 4500 | 4500 | 4500 + s_46 | s_460 | s_4600 | 2115-10-08 05:46:40 | 46 | 460 | 4600 | 4600 | 4600 | 4600 | 4600 | s_46 | s_460 | s_4600 | 2115-10-08 05:46:40 | 46 | 460 | 4600 | 4600 | 4600 | 4600 | 4600 + s_47 | s_470 | s_4700 | 2118-12-09 03:33:20 | 47 | 470 | 4700 | 4700 | 4700 | 4700 | 4700 | s_47 | s_470 | s_4700 | 2118-12-09 03:33:20 | 47 | 470 | 4700 | 4700 | 4700 | 4700 | 4700 + s_48 | s_480 | s_4800 | 2122-02-08 01:20:00 | 48 | 480 | 4800 | 4800 | 4800 | 4800 | 4800 | s_48 | s_480 | s_4800 | 2122-02-08 01:20:00 | 48 | 480 | 4800 | 4800 | 4800 | 4800 | 4800 + s_49 | s_490 | s_4900 | 2125-04-10 11:06:40 | 49 | 490 | 4900 | 4900 | 4900 | 4900 | 4900 | s_49 | s_490 | s_4900 | 2125-04-10 11:06:40 | 49 | 490 | 4900 | 4900 | 4900 | 4900 | 4900 + s_50 | s_500 | s_5000 | 2128-06-11 08:53:20 | 50 | 500 | 5000 | 5000 | 5000 | 5000 | 5000 | s_50 | s_500 | s_5000 | 2128-06-11 08:53:20 | 50 | 500 | 5000 | 5000 | 5000 | 5000 | 5000 + s_51 | s_510 | s_5100 | 2131-08-12 06:40:00 | 51 | 510 | 5100 | 5100 | 5100 | 5100 | 5100 | s_51 | s_510 | s_5100 | 2131-08-12 06:40:00 | 51 | 510 | 5100 | 5100 | 5100 | 5100 | 5100 + s_52 | s_520 | s_5200 | 2134-10-13 04:26:40 | 52 | 520 | 5200 | 5200 | 5200 | 5200 | 5200 | s_52 | s_520 | s_5200 | 2134-10-13 04:26:40 | 52 | 520 | 5200 | 5200 | 5200 | 5200 | 5200 + s_53 | s_530 | s_5300 | 2137-12-13 02:13:20 | 53 | 530 | 5300 | 5300 | 5300 | 5300 | 5300 | s_53 | s_530 | s_5300 | 2137-12-13 02:13:20 | 53 | 530 | 5300 | 5300 | 5300 | 5300 | 5300 + s_54 | s_540 | s_5400 | 2141-02-13 12:00:00 | 54 | 540 | 5400 | 5400 | 5400 | 5400 | 5400 | s_54 | s_540 | s_5400 | 2141-02-13 12:00:00 | 54 | 540 | 5400 | 5400 | 5400 | 5400 | 5400 + s_55 | s_550 | s_5500 | 2144-04-15 09:46:40 | 55 | 550 | 5500 | 5500 | 5500 | 5500 | 5500 | s_55 | s_550 | s_5500 | 2144-04-15 09:46:40 | 55 | 550 | 5500 | 5500 | 5500 | 5500 | 5500 + s_56 | s_560 | s_5600 | 2147-06-16 07:33:20 | 56 | 560 | 5600 | 5600 | 5600 | 5600 | 5600 | s_56 | s_560 | s_5600 | 2147-06-16 07:33:20 | 56 | 560 | 5600 | 5600 | 5600 | 5600 | 5600 + s_57 | s_570 | s_5700 | 2150-08-17 05:20:00 | 57 | 570 | 5700 | 5700 | 5700 | 5700 | 5700 | s_57 | s_570 | s_5700 | 2150-08-17 05:20:00 | 57 | 570 | 5700 | 5700 | 5700 | 5700 | 5700 + s_58 | s_580 | s_5800 | 2153-10-17 03:06:40 | 58 | 580 | 5800 | 5800 | 5800 | 5800 | 5800 | s_58 | s_580 | s_5800 | 2153-10-17 03:06:40 | 58 | 580 | 5800 | 5800 | 5800 | 5800 | 5800 + s_59 | s_590 | s_5900 | 2156-12-18 12:53:20 | 59 | 590 | 5900 | 5900 | 5900 | 5900 | 5900 | s_59 | s_590 | s_5900 | 2156-12-18 12:53:20 | 59 | 590 | 5900 | 5900 | 5900 | 5900 | 5900 + s_60 | s_600 | s_6000 | 2160-02-18 10:40:00 | 60 | 600 | 6000 | 6000 | 6000 | 6000 | 6000 | s_60 | s_600 | s_6000 | 2160-02-18 10:40:00 | 60 | 600 | 6000 | 6000 | 6000 | 6000 | 6000 + s_61 | s_610 | s_6100 | 2163-04-20 08:26:40 | 61 | 610 | 6100 | 6100 | 6100 | 6100 | 6100 | s_61 | s_610 | s_6100 | 2163-04-20 08:26:40 | 61 | 610 | 6100 | 6100 | 6100 | 6100 | 6100 + s_62 | s_620 | s_6200 | 2166-06-21 06:13:20 | 62 | 620 | 6200 | 6200 | 6200 | 6200 | 6200 | s_62 | s_620 | s_6200 | 2166-06-21 06:13:20 | 62 | 620 | 6200 | 6200 | 6200 | 6200 | 6200 + s_63 | s_630 | s_6300 | 2169-08-21 04:00:00 | 63 | 630 | 6300 | 6300 | 6300 | 6300 | 6300 | s_63 | s_630 | s_6300 | 2169-08-21 04:00:00 | 63 | 630 | 6300 | 6300 | 6300 | 6300 | 6300 + s_64 | s_640 | s_6400 | 2172-10-22 01:46:40 | 64 | 640 | 6400 | 6400 | 6400 | 6400 | 6400 | s_64 | s_640 | s_6400 | 2172-10-22 01:46:40 | 64 | 640 | 6400 | 6400 | 6400 | 6400 | 6400 + s_65 | s_650 | s_6500 | 2175-12-23 11:33:20 | 65 | 650 | 6500 | 6500 | 6500 | 6500 | 6500 | s_65 | s_650 | s_6500 | 2175-12-23 11:33:20 | 65 | 650 | 6500 | 6500 | 6500 | 6500 | 6500 + s_66 | s_660 | s_6600 | 2179-02-22 09:20:00 | 66 | 660 | 6600 | 6600 | 6600 | 6600 | 6600 | s_66 | s_660 | s_6600 | 2179-02-22 09:20:00 | 66 | 660 | 6600 | 6600 | 6600 | 6600 | 6600 + s_67 | s_670 | s_6700 | 2182-04-25 07:06:40 | 67 | 670 | 6700 | 6700 | 6700 | 6700 | 6700 | s_67 | s_670 | s_6700 | 2182-04-25 07:06:40 | 67 | 670 | 6700 | 6700 | 6700 | 6700 | 6700 + s_68 | s_680 | s_6800 | 2185-06-25 04:53:20 | 68 | 680 | 6800 | 6800 | 6800 | 6800 | 6800 | s_68 | s_680 | s_6800 | 2185-06-25 04:53:20 | 68 | 680 | 6800 | 6800 | 6800 | 6800 | 6800 + s_69 | s_690 | s_6900 | 2188-08-26 02:40:00 | 69 | 690 | 6900 | 6900 | 6900 | 6900 | 6900 | s_69 | s_690 | s_6900 | 2188-08-26 02:40:00 | 69 | 690 | 6900 | 6900 | 6900 | 6900 | 6900 + s_70 | s_700 | s_7000 | 2191-10-27 12:26:40 | 70 | 700 | 7000 | 7000 | 7000 | 7000 | 7000 | s_70 | s_700 | s_7000 | 2191-10-27 12:26:40 | 70 | 700 | 7000 | 7000 | 7000 | 7000 | 7000 + s_71 | s_710 | s_7100 | 2194-12-27 10:13:20 | 71 | 710 | 7100 | 7100 | 7100 | 7100 | 7100 | s_71 | s_710 | s_7100 | 2194-12-27 10:13:20 | 71 | 710 | 7100 | 7100 | 7100 | 7100 | 7100 + s_72 | s_720 | s_7200 | 2198-02-27 08:00:00 | 72 | 720 | 7200 | 7200 | 7200 | 7200 | 7200 | s_72 | s_720 | s_7200 | 2198-02-27 08:00:00 | 72 | 720 | 7200 | 7200 | 7200 | 7200 | 7200 + s_73 | s_730 | s_7300 | 2201-04-30 05:46:40 | 73 | 730 | 7300 | 7300 | 7300 | 7300 | 7300 | s_73 | s_730 | s_7300 | 2201-04-30 05:46:40 | 73 | 730 | 7300 | 7300 | 7300 | 7300 | 7300 + s_74 | s_740 | s_7400 | 2204-07-01 03:33:20 | 74 | 740 | 7400 | 7400 | 7400 | 7400 | 7400 | s_74 | s_740 | s_7400 | 2204-07-01 03:33:20 | 74 | 740 | 7400 | 7400 | 7400 | 7400 | 7400 + s_75 | s_750 | s_7500 | 2207-09-01 01:20:00 | 75 | 750 | 7500 | 7500 | 7500 | 7500 | 7500 | s_75 | s_750 | s_7500 | 2207-09-01 01:20:00 | 75 | 750 | 7500 | 7500 | 7500 | 7500 | 7500 + s_76 | s_760 | s_7600 | 2210-11-01 11:06:40 | 76 | 760 | 7600 | 7600 | 7600 | 7600 | 7600 | s_76 | s_760 | s_7600 | 2210-11-01 11:06:40 | 76 | 760 | 7600 | 7600 | 7600 | 7600 | 7600 + s_77 | s_770 | s_7700 | 2214-01-02 08:53:20 | 77 | 770 | 7700 | 7700 | 7700 | 7700 | 7700 | s_77 | s_770 | s_7700 | 2214-01-02 08:53:20 | 77 | 770 | 7700 | 7700 | 7700 | 7700 | 7700 + s_78 | s_780 | s_7800 | 2217-03-04 06:40:00 | 78 | 780 | 7800 | 7800 | 7800 | 7800 | 7800 | s_78 | s_780 | s_7800 | 2217-03-04 06:40:00 | 78 | 780 | 7800 | 7800 | 7800 | 7800 | 7800 + s_79 | s_790 | s_7900 | 2220-05-05 04:26:40 | 79 | 790 | 7900 | 7900 | 7900 | 7900 | 7900 | s_79 | s_790 | s_7900 | 2220-05-05 04:26:40 | 79 | 790 | 7900 | 7900 | 7900 | 7900 | 7900 + s_80 | s_800 | s_8000 | 2223-07-06 02:13:20 | 80 | 800 | 8000 | 8000 | 8000 | 8000 | 8000 | s_80 | s_800 | s_8000 | 2223-07-06 02:13:20 | 80 | 800 | 8000 | 8000 | 8000 | 8000 | 8000 + s_81 | s_810 | s_8100 | 2226-09-06 12:00:00 | 81 | 810 | 8100 | 8100 | 8100 | 8100 | 8100 | s_81 | s_810 | s_8100 | 2226-09-06 12:00:00 | 81 | 810 | 8100 | 8100 | 8100 | 8100 | 8100 + s_82 | s_820 | s_8200 | 2229-11-06 09:46:40 | 82 | 820 | 8200 | 8200 | 8200 | 8200 | 8200 | s_82 | s_820 | s_8200 | 2229-11-06 09:46:40 | 82 | 820 | 8200 | 8200 | 8200 | 8200 | 8200 + s_83 | s_830 | s_8300 | 2233-01-06 07:33:20 | 83 | 830 | 8300 | 8300 | 8300 | 8300 | 8300 | s_83 | s_830 | s_8300 | 2233-01-06 07:33:20 | 83 | 830 | 8300 | 8300 | 8300 | 8300 | 8300 + s_84 | s_840 | s_8400 | 2236-03-09 05:20:00 | 84 | 840 | 8400 | 8400 | 8400 | 8400 | 8400 | s_84 | s_840 | s_8400 | 2236-03-09 05:20:00 | 84 | 840 | 8400 | 8400 | 8400 | 8400 | 8400 + s_85 | s_850 | s_8500 | 2239-05-10 03:06:40 | 85 | 850 | 8500 | 8500 | 8500 | 8500 | 8500 | s_85 | s_850 | s_8500 | 2239-05-10 03:06:40 | 85 | 850 | 8500 | 8500 | 8500 | 8500 | 8500 + s_86 | s_860 | s_8600 | 2242-07-11 12:53:20 | 86 | 860 | 8600 | 8600 | 8600 | 8600 | 8600 | s_86 | s_860 | s_8600 | 2242-07-11 12:53:20 | 86 | 860 | 8600 | 8600 | 8600 | 8600 | 8600 + s_87 | s_870 | s_8700 | 2245-09-10 10:40:00 | 87 | 870 | 8700 | 8700 | 8700 | 8700 | 8700 | s_87 | s_870 | s_8700 | 2245-09-10 10:40:00 | 87 | 870 | 8700 | 8700 | 8700 | 8700 | 8700 + s_88 | s_880 | s_8800 | 2248-11-10 08:26:40 | 88 | 880 | 8800 | 8800 | 8800 | 8800 | 8800 | s_88 | s_880 | s_8800 | 2248-11-10 08:26:40 | 88 | 880 | 8800 | 8800 | 8800 | 8800 | 8800 + s_89 | s_890 | s_8900 | 2252-01-12 06:13:20 | 89 | 890 | 8900 | 8900 | 8900 | 8900 | 8900 | s_89 | s_890 | s_8900 | 2252-01-12 06:13:20 | 89 | 890 | 8900 | 8900 | 8900 | 8900 | 8900 + s_90 | s_900 | s_9000 | 2255-03-14 04:00:00 | 90 | 900 | 9000 | 9000 | 9000 | 9000 | 9000 | s_90 | s_900 | s_9000 | 2255-03-14 04:00:00 | 90 | 900 | 9000 | 9000 | 9000 | 9000 | 9000 + s_91 | s_910 | s_9100 | 2258-05-15 01:46:40 | 91 | 910 | 9100 | 9100 | 9100 | 9100 | 9100 | s_91 | s_910 | s_9100 | 2258-05-15 01:46:40 | 91 | 910 | 9100 | 9100 | 9100 | 9100 | 9100 + s_92 | s_920 | s_9200 | 2261-07-15 11:33:20 | 92 | 920 | 9200 | 9200 | 9200 | 9200 | 9200 | s_92 | s_920 | s_9200 | 2261-07-15 11:33:20 | 92 | 920 | 9200 | 9200 | 9200 | 9200 | 9200 + s_93 | s_930 | s_9300 | 2264-09-14 09:20:00 | 93 | 930 | 9300 | 9300 | 9300 | 9300 | 9300 | s_93 | s_930 | s_9300 | 2264-09-14 09:20:00 | 93 | 930 | 9300 | 9300 | 9300 | 9300 | 9300 + s_94 | s_940 | s_9400 | 2267-11-16 07:06:40 | 94 | 940 | 9400 | 9400 | 9400 | 9400 | 9400 | s_94 | s_940 | s_9400 | 2267-11-16 07:06:40 | 94 | 940 | 9400 | 9400 | 9400 | 9400 | 9400 + s_95 | s_950 | s_9500 | 2271-01-16 04:53:20 | 95 | 950 | 9500 | 9500 | 9500 | 9500 | 9500 | s_95 | s_950 | s_9500 | 2271-01-16 04:53:20 | 95 | 950 | 9500 | 9500 | 9500 | 9500 | 9500 + s_96 | s_960 | s_9600 | 2274-03-19 02:40:00 | 96 | 960 | 9600 | 9600 | 9600 | 9600 | 9600 | s_96 | s_960 | s_9600 | 2274-03-19 02:40:00 | 96 | 960 | 9600 | 9600 | 9600 | 9600 | 9600 + s_97 | s_970 | s_9700 | 2277-05-19 12:26:40 | 97 | 970 | 9700 | 9700 | 9700 | 9700 | 9700 | s_97 | s_970 | s_9700 | 2277-05-19 12:26:40 | 97 | 970 | 9700 | 9700 | 9700 | 9700 | 9700 + s_98 | s_980 | s_9800 | 2280-07-19 10:13:20 | 98 | 980 | 9800 | 9800 | 9800 | 9800 | 9800 | s_98 | s_980 | s_9800 | 2280-07-19 10:13:20 | 98 | 980 | 9800 | 9800 | 9800 | 9800 | 9800 + s_99 | s_990 | s_9900 | 2283-09-20 08:00:00 | 99 | 990 | 9900 | 9900 | 9900 | 9900 | 9900 | s_99 | s_990 | s_9900 | 2283-09-20 08:00:00 | 99 | 990 | 9900 | 9900 | 9900 | 9900 | 9900 + s_100 | s_1000 | s_10000 | 2286-11-20 05:46:40 | 100 | 1000 | 10000 | 10000 | 10000 | 10000 | 10000 | s_100 | s_1000 | s_10000 | 2286-11-20 05:46:40 | 100 | 1000 | 10000 | 10000 | 10000 | 10000 | 10000 +(100 rows) + +SELECT * from pxf_multibyte_twobyte_data_with_skip ORDER BY n1; + s1 | s2 | s3 | d1 | n1 | n2 | n3 | n4 | n5 | n6 | n7 | s11 | s12 | s13 | d11 | n11 | n12 | n13 | n14 | n15 | n16 | n17 +-------+--------+---------+---------------------+-----+------+-------+-------+-------+-------+-------+-------+--------+---------+---------------------+-----+------+-------+-------+-------+-------+------- + s_11 | s_110 | s_1100 | 2004-11-09 11:33:20 | 11 | 110 | 1100 | 1100 | 1100 | 1100 | 1100 | s_11 | s_110 | s_1100 | 2004-11-09 11:33:20 | 11 | 110 | 1100 | 1100 | 1100 | 1100 | 1100 + s_12 | s_120 | s_1200 | 2008-01-10 09:20:00 | 12 | 120 | 1200 | 1200 | 1200 | 1200 | 1200 | s_12 | s_120 | s_1200 | 2008-01-10 09:20:00 | 12 | 120 | 1200 | 1200 | 1200 | 1200 | 1200 + s_13 | s_130 | s_1300 | 2011-03-13 07:06:40 | 13 | 130 | 1300 | 1300 | 1300 | 1300 | 1300 | s_13 | s_130 | s_1300 | 2011-03-13 07:06:40 | 13 | 130 | 1300 | 1300 | 1300 | 1300 | 1300 + s_14 | s_140 | s_1400 | 2014-05-13 04:53:20 | 14 | 140 | 1400 | 1400 | 1400 | 1400 | 1400 | s_14 | s_140 | s_1400 | 2014-05-13 04:53:20 | 14 | 140 | 1400 | 1400 | 1400 | 1400 | 1400 + s_15 | s_150 | s_1500 | 2017-07-14 02:40:00 | 15 | 150 | 1500 | 1500 | 1500 | 1500 | 1500 | s_15 | s_150 | s_1500 | 2017-07-14 02:40:00 | 15 | 150 | 1500 | 1500 | 1500 | 1500 | 1500 + s_16 | s_160 | s_1600 | 2020-09-13 12:26:40 | 16 | 160 | 1600 | 1600 | 1600 | 1600 | 1600 | s_16 | s_160 | s_1600 | 2020-09-13 12:26:40 | 16 | 160 | 1600 | 1600 | 1600 | 1600 | 1600 + s_17 | s_170 | s_1700 | 2023-11-14 10:13:20 | 17 | 170 | 1700 | 1700 | 1700 | 1700 | 1700 | s_17 | s_170 | s_1700 | 2023-11-14 10:13:20 | 17 | 170 | 1700 | 1700 | 1700 | 1700 | 1700 + s_18 | s_180 | s_1800 | 2027-01-15 08:00:00 | 18 | 180 | 1800 | 1800 | 1800 | 1800 | 1800 | s_18 | s_180 | s_1800 | 2027-01-15 08:00:00 | 18 | 180 | 1800 | 1800 | 1800 | 1800 | 1800 + s_19 | s_190 | s_1900 | 2030-03-17 05:46:40 | 19 | 190 | 1900 | 1900 | 1900 | 1900 | 1900 | s_19 | s_190 | s_1900 | 2030-03-17 05:46:40 | 19 | 190 | 1900 | 1900 | 1900 | 1900 | 1900 + s_20 | s_200 | s_2000 | 2033-05-18 03:33:20 | 20 | 200 | 2000 | 2000 | 2000 | 2000 | 2000 | s_20 | s_200 | s_2000 | 2033-05-18 03:33:20 | 20 | 200 | 2000 | 2000 | 2000 | 2000 | 2000 + s_21 | s_210 | s_2100 | 2036-07-18 01:20:00 | 21 | 210 | 2100 | 2100 | 2100 | 2100 | 2100 | s_21 | s_210 | s_2100 | 2036-07-18 01:20:00 | 21 | 210 | 2100 | 2100 | 2100 | 2100 | 2100 + s_22 | s_220 | s_2200 | 2039-09-18 11:06:40 | 22 | 220 | 2200 | 2200 | 2200 | 2200 | 2200 | s_22 | s_220 | s_2200 | 2039-09-18 11:06:40 | 22 | 220 | 2200 | 2200 | 2200 | 2200 | 2200 + s_23 | s_230 | s_2300 | 2042-11-19 08:53:20 | 23 | 230 | 2300 | 2300 | 2300 | 2300 | 2300 | s_23 | s_230 | s_2300 | 2042-11-19 08:53:20 | 23 | 230 | 2300 | 2300 | 2300 | 2300 | 2300 + s_24 | s_240 | s_2400 | 2046-01-19 06:40:00 | 24 | 240 | 2400 | 2400 | 2400 | 2400 | 2400 | s_24 | s_240 | s_2400 | 2046-01-19 06:40:00 | 24 | 240 | 2400 | 2400 | 2400 | 2400 | 2400 + s_25 | s_250 | s_2500 | 2049-03-22 04:26:40 | 25 | 250 | 2500 | 2500 | 2500 | 2500 | 2500 | s_25 | s_250 | s_2500 | 2049-03-22 04:26:40 | 25 | 250 | 2500 | 2500 | 2500 | 2500 | 2500 + s_26 | s_260 | s_2600 | 2052-05-22 02:13:20 | 26 | 260 | 2600 | 2600 | 2600 | 2600 | 2600 | s_26 | s_260 | s_2600 | 2052-05-22 02:13:20 | 26 | 260 | 2600 | 2600 | 2600 | 2600 | 2600 + s_27 | s_270 | s_2700 | 2055-07-24 12:00:00 | 27 | 270 | 2700 | 2700 | 2700 | 2700 | 2700 | s_27 | s_270 | s_2700 | 2055-07-24 12:00:00 | 27 | 270 | 2700 | 2700 | 2700 | 2700 | 2700 + s_28 | s_280 | s_2800 | 2058-09-23 09:46:40 | 28 | 280 | 2800 | 2800 | 2800 | 2800 | 2800 | s_28 | s_280 | s_2800 | 2058-09-23 09:46:40 | 28 | 280 | 2800 | 2800 | 2800 | 2800 | 2800 + s_29 | s_290 | s_2900 | 2061-11-23 07:33:20 | 29 | 290 | 2900 | 2900 | 2900 | 2900 | 2900 | s_29 | s_290 | s_2900 | 2061-11-23 07:33:20 | 29 | 290 | 2900 | 2900 | 2900 | 2900 | 2900 + s_30 | s_300 | s_3000 | 2065-01-24 05:20:00 | 30 | 300 | 3000 | 3000 | 3000 | 3000 | 3000 | s_30 | s_300 | s_3000 | 2065-01-24 05:20:00 | 30 | 300 | 3000 | 3000 | 3000 | 3000 | 3000 + s_31 | s_310 | s_3100 | 2068-03-26 03:06:40 | 31 | 310 | 3100 | 3100 | 3100 | 3100 | 3100 | s_31 | s_310 | s_3100 | 2068-03-26 03:06:40 | 31 | 310 | 3100 | 3100 | 3100 | 3100 | 3100 + s_32 | s_320 | s_3200 | 2071-05-28 12:53:20 | 32 | 320 | 3200 | 3200 | 3200 | 3200 | 3200 | s_32 | s_320 | s_3200 | 2071-05-28 12:53:20 | 32 | 320 | 3200 | 3200 | 3200 | 3200 | 3200 + s_33 | s_330 | s_3300 | 2074-07-28 10:40:00 | 33 | 330 | 3300 | 3300 | 3300 | 3300 | 3300 | s_33 | s_330 | s_3300 | 2074-07-28 10:40:00 | 33 | 330 | 3300 | 3300 | 3300 | 3300 | 3300 + s_34 | s_340 | s_3400 | 2077-09-27 08:26:40 | 34 | 340 | 3400 | 3400 | 3400 | 3400 | 3400 | s_34 | s_340 | s_3400 | 2077-09-27 08:26:40 | 34 | 340 | 3400 | 3400 | 3400 | 3400 | 3400 + s_35 | s_350 | s_3500 | 2080-11-28 06:13:20 | 35 | 350 | 3500 | 3500 | 3500 | 3500 | 3500 | s_35 | s_350 | s_3500 | 2080-11-28 06:13:20 | 35 | 350 | 3500 | 3500 | 3500 | 3500 | 3500 + s_36 | s_360 | s_3600 | 2084-01-29 04:00:00 | 36 | 360 | 3600 | 3600 | 3600 | 3600 | 3600 | s_36 | s_360 | s_3600 | 2084-01-29 04:00:00 | 36 | 360 | 3600 | 3600 | 3600 | 3600 | 3600 + s_37 | s_370 | s_3700 | 2087-04-01 01:46:40 | 37 | 370 | 3700 | 3700 | 3700 | 3700 | 3700 | s_37 | s_370 | s_3700 | 2087-04-01 01:46:40 | 37 | 370 | 3700 | 3700 | 3700 | 3700 | 3700 + s_38 | s_380 | s_3800 | 2090-06-01 11:33:20 | 38 | 380 | 3800 | 3800 | 3800 | 3800 | 3800 | s_38 | s_380 | s_3800 | 2090-06-01 11:33:20 | 38 | 380 | 3800 | 3800 | 3800 | 3800 | 3800 + s_39 | s_390 | s_3900 | 2093-08-01 09:20:00 | 39 | 390 | 3900 | 3900 | 3900 | 3900 | 3900 | s_39 | s_390 | s_3900 | 2093-08-01 09:20:00 | 39 | 390 | 3900 | 3900 | 3900 | 3900 | 3900 + s_40 | s_400 | s_4000 | 2096-10-02 07:06:40 | 40 | 400 | 4000 | 4000 | 4000 | 4000 | 4000 | s_40 | s_400 | s_4000 | 2096-10-02 07:06:40 | 40 | 400 | 4000 | 4000 | 4000 | 4000 | 4000 + s_41 | s_410 | s_4100 | 2099-12-03 04:53:20 | 41 | 410 | 4100 | 4100 | 4100 | 4100 | 4100 | s_41 | s_410 | s_4100 | 2099-12-03 04:53:20 | 41 | 410 | 4100 | 4100 | 4100 | 4100 | 4100 + s_42 | s_420 | s_4200 | 2103-02-04 02:40:00 | 42 | 420 | 4200 | 4200 | 4200 | 4200 | 4200 | s_42 | s_420 | s_4200 | 2103-02-04 02:40:00 | 42 | 420 | 4200 | 4200 | 4200 | 4200 | 4200 + s_43 | s_430 | s_4300 | 2106-04-06 12:26:40 | 43 | 430 | 4300 | 4300 | 4300 | 4300 | 4300 | s_43 | s_430 | s_4300 | 2106-04-06 12:26:40 | 43 | 430 | 4300 | 4300 | 4300 | 4300 | 4300 + s_44 | s_440 | s_4400 | 2109-06-06 10:13:20 | 44 | 440 | 4400 | 4400 | 4400 | 4400 | 4400 | s_44 | s_440 | s_4400 | 2109-06-06 10:13:20 | 44 | 440 | 4400 | 4400 | 4400 | 4400 | 4400 + s_45 | s_450 | s_4500 | 2112-08-07 08:00:00 | 45 | 450 | 4500 | 4500 | 4500 | 4500 | 4500 | s_45 | s_450 | s_4500 | 2112-08-07 08:00:00 | 45 | 450 | 4500 | 4500 | 4500 | 4500 | 4500 + s_46 | s_460 | s_4600 | 2115-10-08 05:46:40 | 46 | 460 | 4600 | 4600 | 4600 | 4600 | 4600 | s_46 | s_460 | s_4600 | 2115-10-08 05:46:40 | 46 | 460 | 4600 | 4600 | 4600 | 4600 | 4600 + s_47 | s_470 | s_4700 | 2118-12-09 03:33:20 | 47 | 470 | 4700 | 4700 | 4700 | 4700 | 4700 | s_47 | s_470 | s_4700 | 2118-12-09 03:33:20 | 47 | 470 | 4700 | 4700 | 4700 | 4700 | 4700 + s_48 | s_480 | s_4800 | 2122-02-08 01:20:00 | 48 | 480 | 4800 | 4800 | 4800 | 4800 | 4800 | s_48 | s_480 | s_4800 | 2122-02-08 01:20:00 | 48 | 480 | 4800 | 4800 | 4800 | 4800 | 4800 + s_49 | s_490 | s_4900 | 2125-04-10 11:06:40 | 49 | 490 | 4900 | 4900 | 4900 | 4900 | 4900 | s_49 | s_490 | s_4900 | 2125-04-10 11:06:40 | 49 | 490 | 4900 | 4900 | 4900 | 4900 | 4900 + s_50 | s_500 | s_5000 | 2128-06-11 08:53:20 | 50 | 500 | 5000 | 5000 | 5000 | 5000 | 5000 | s_50 | s_500 | s_5000 | 2128-06-11 08:53:20 | 50 | 500 | 5000 | 5000 | 5000 | 5000 | 5000 + s_51 | s_510 | s_5100 | 2131-08-12 06:40:00 | 51 | 510 | 5100 | 5100 | 5100 | 5100 | 5100 | s_51 | s_510 | s_5100 | 2131-08-12 06:40:00 | 51 | 510 | 5100 | 5100 | 5100 | 5100 | 5100 + s_52 | s_520 | s_5200 | 2134-10-13 04:26:40 | 52 | 520 | 5200 | 5200 | 5200 | 5200 | 5200 | s_52 | s_520 | s_5200 | 2134-10-13 04:26:40 | 52 | 520 | 5200 | 5200 | 5200 | 5200 | 5200 + s_53 | s_530 | s_5300 | 2137-12-13 02:13:20 | 53 | 530 | 5300 | 5300 | 5300 | 5300 | 5300 | s_53 | s_530 | s_5300 | 2137-12-13 02:13:20 | 53 | 530 | 5300 | 5300 | 5300 | 5300 | 5300 + s_54 | s_540 | s_5400 | 2141-02-13 12:00:00 | 54 | 540 | 5400 | 5400 | 5400 | 5400 | 5400 | s_54 | s_540 | s_5400 | 2141-02-13 12:00:00 | 54 | 540 | 5400 | 5400 | 5400 | 5400 | 5400 + s_55 | s_550 | s_5500 | 2144-04-15 09:46:40 | 55 | 550 | 5500 | 5500 | 5500 | 5500 | 5500 | s_55 | s_550 | s_5500 | 2144-04-15 09:46:40 | 55 | 550 | 5500 | 5500 | 5500 | 5500 | 5500 + s_56 | s_560 | s_5600 | 2147-06-16 07:33:20 | 56 | 560 | 5600 | 5600 | 5600 | 5600 | 5600 | s_56 | s_560 | s_5600 | 2147-06-16 07:33:20 | 56 | 560 | 5600 | 5600 | 5600 | 5600 | 5600 + s_57 | s_570 | s_5700 | 2150-08-17 05:20:00 | 57 | 570 | 5700 | 5700 | 5700 | 5700 | 5700 | s_57 | s_570 | s_5700 | 2150-08-17 05:20:00 | 57 | 570 | 5700 | 5700 | 5700 | 5700 | 5700 + s_58 | s_580 | s_5800 | 2153-10-17 03:06:40 | 58 | 580 | 5800 | 5800 | 5800 | 5800 | 5800 | s_58 | s_580 | s_5800 | 2153-10-17 03:06:40 | 58 | 580 | 5800 | 5800 | 5800 | 5800 | 5800 + s_59 | s_590 | s_5900 | 2156-12-18 12:53:20 | 59 | 590 | 5900 | 5900 | 5900 | 5900 | 5900 | s_59 | s_590 | s_5900 | 2156-12-18 12:53:20 | 59 | 590 | 5900 | 5900 | 5900 | 5900 | 5900 + s_60 | s_600 | s_6000 | 2160-02-18 10:40:00 | 60 | 600 | 6000 | 6000 | 6000 | 6000 | 6000 | s_60 | s_600 | s_6000 | 2160-02-18 10:40:00 | 60 | 600 | 6000 | 6000 | 6000 | 6000 | 6000 + s_61 | s_610 | s_6100 | 2163-04-20 08:26:40 | 61 | 610 | 6100 | 6100 | 6100 | 6100 | 6100 | s_61 | s_610 | s_6100 | 2163-04-20 08:26:40 | 61 | 610 | 6100 | 6100 | 6100 | 6100 | 6100 + s_62 | s_620 | s_6200 | 2166-06-21 06:13:20 | 62 | 620 | 6200 | 6200 | 6200 | 6200 | 6200 | s_62 | s_620 | s_6200 | 2166-06-21 06:13:20 | 62 | 620 | 6200 | 6200 | 6200 | 6200 | 6200 + s_63 | s_630 | s_6300 | 2169-08-21 04:00:00 | 63 | 630 | 6300 | 6300 | 6300 | 6300 | 6300 | s_63 | s_630 | s_6300 | 2169-08-21 04:00:00 | 63 | 630 | 6300 | 6300 | 6300 | 6300 | 6300 + s_64 | s_640 | s_6400 | 2172-10-22 01:46:40 | 64 | 640 | 6400 | 6400 | 6400 | 6400 | 6400 | s_64 | s_640 | s_6400 | 2172-10-22 01:46:40 | 64 | 640 | 6400 | 6400 | 6400 | 6400 | 6400 + s_65 | s_650 | s_6500 | 2175-12-23 11:33:20 | 65 | 650 | 6500 | 6500 | 6500 | 6500 | 6500 | s_65 | s_650 | s_6500 | 2175-12-23 11:33:20 | 65 | 650 | 6500 | 6500 | 6500 | 6500 | 6500 + s_66 | s_660 | s_6600 | 2179-02-22 09:20:00 | 66 | 660 | 6600 | 6600 | 6600 | 6600 | 6600 | s_66 | s_660 | s_6600 | 2179-02-22 09:20:00 | 66 | 660 | 6600 | 6600 | 6600 | 6600 | 6600 + s_67 | s_670 | s_6700 | 2182-04-25 07:06:40 | 67 | 670 | 6700 | 6700 | 6700 | 6700 | 6700 | s_67 | s_670 | s_6700 | 2182-04-25 07:06:40 | 67 | 670 | 6700 | 6700 | 6700 | 6700 | 6700 + s_68 | s_680 | s_6800 | 2185-06-25 04:53:20 | 68 | 680 | 6800 | 6800 | 6800 | 6800 | 6800 | s_68 | s_680 | s_6800 | 2185-06-25 04:53:20 | 68 | 680 | 6800 | 6800 | 6800 | 6800 | 6800 + s_69 | s_690 | s_6900 | 2188-08-26 02:40:00 | 69 | 690 | 6900 | 6900 | 6900 | 6900 | 6900 | s_69 | s_690 | s_6900 | 2188-08-26 02:40:00 | 69 | 690 | 6900 | 6900 | 6900 | 6900 | 6900 + s_70 | s_700 | s_7000 | 2191-10-27 12:26:40 | 70 | 700 | 7000 | 7000 | 7000 | 7000 | 7000 | s_70 | s_700 | s_7000 | 2191-10-27 12:26:40 | 70 | 700 | 7000 | 7000 | 7000 | 7000 | 7000 + s_71 | s_710 | s_7100 | 2194-12-27 10:13:20 | 71 | 710 | 7100 | 7100 | 7100 | 7100 | 7100 | s_71 | s_710 | s_7100 | 2194-12-27 10:13:20 | 71 | 710 | 7100 | 7100 | 7100 | 7100 | 7100 + s_72 | s_720 | s_7200 | 2198-02-27 08:00:00 | 72 | 720 | 7200 | 7200 | 7200 | 7200 | 7200 | s_72 | s_720 | s_7200 | 2198-02-27 08:00:00 | 72 | 720 | 7200 | 7200 | 7200 | 7200 | 7200 + s_73 | s_730 | s_7300 | 2201-04-30 05:46:40 | 73 | 730 | 7300 | 7300 | 7300 | 7300 | 7300 | s_73 | s_730 | s_7300 | 2201-04-30 05:46:40 | 73 | 730 | 7300 | 7300 | 7300 | 7300 | 7300 + s_74 | s_740 | s_7400 | 2204-07-01 03:33:20 | 74 | 740 | 7400 | 7400 | 7400 | 7400 | 7400 | s_74 | s_740 | s_7400 | 2204-07-01 03:33:20 | 74 | 740 | 7400 | 7400 | 7400 | 7400 | 7400 + s_75 | s_750 | s_7500 | 2207-09-01 01:20:00 | 75 | 750 | 7500 | 7500 | 7500 | 7500 | 7500 | s_75 | s_750 | s_7500 | 2207-09-01 01:20:00 | 75 | 750 | 7500 | 7500 | 7500 | 7500 | 7500 + s_76 | s_760 | s_7600 | 2210-11-01 11:06:40 | 76 | 760 | 7600 | 7600 | 7600 | 7600 | 7600 | s_76 | s_760 | s_7600 | 2210-11-01 11:06:40 | 76 | 760 | 7600 | 7600 | 7600 | 7600 | 7600 + s_77 | s_770 | s_7700 | 2214-01-02 08:53:20 | 77 | 770 | 7700 | 7700 | 7700 | 7700 | 7700 | s_77 | s_770 | s_7700 | 2214-01-02 08:53:20 | 77 | 770 | 7700 | 7700 | 7700 | 7700 | 7700 + s_78 | s_780 | s_7800 | 2217-03-04 06:40:00 | 78 | 780 | 7800 | 7800 | 7800 | 7800 | 7800 | s_78 | s_780 | s_7800 | 2217-03-04 06:40:00 | 78 | 780 | 7800 | 7800 | 7800 | 7800 | 7800 + s_79 | s_790 | s_7900 | 2220-05-05 04:26:40 | 79 | 790 | 7900 | 7900 | 7900 | 7900 | 7900 | s_79 | s_790 | s_7900 | 2220-05-05 04:26:40 | 79 | 790 | 7900 | 7900 | 7900 | 7900 | 7900 + s_80 | s_800 | s_8000 | 2223-07-06 02:13:20 | 80 | 800 | 8000 | 8000 | 8000 | 8000 | 8000 | s_80 | s_800 | s_8000 | 2223-07-06 02:13:20 | 80 | 800 | 8000 | 8000 | 8000 | 8000 | 8000 + s_81 | s_810 | s_8100 | 2226-09-06 12:00:00 | 81 | 810 | 8100 | 8100 | 8100 | 8100 | 8100 | s_81 | s_810 | s_8100 | 2226-09-06 12:00:00 | 81 | 810 | 8100 | 8100 | 8100 | 8100 | 8100 + s_82 | s_820 | s_8200 | 2229-11-06 09:46:40 | 82 | 820 | 8200 | 8200 | 8200 | 8200 | 8200 | s_82 | s_820 | s_8200 | 2229-11-06 09:46:40 | 82 | 820 | 8200 | 8200 | 8200 | 8200 | 8200 + s_83 | s_830 | s_8300 | 2233-01-06 07:33:20 | 83 | 830 | 8300 | 8300 | 8300 | 8300 | 8300 | s_83 | s_830 | s_8300 | 2233-01-06 07:33:20 | 83 | 830 | 8300 | 8300 | 8300 | 8300 | 8300 + s_84 | s_840 | s_8400 | 2236-03-09 05:20:00 | 84 | 840 | 8400 | 8400 | 8400 | 8400 | 8400 | s_84 | s_840 | s_8400 | 2236-03-09 05:20:00 | 84 | 840 | 8400 | 8400 | 8400 | 8400 | 8400 + s_85 | s_850 | s_8500 | 2239-05-10 03:06:40 | 85 | 850 | 8500 | 8500 | 8500 | 8500 | 8500 | s_85 | s_850 | s_8500 | 2239-05-10 03:06:40 | 85 | 850 | 8500 | 8500 | 8500 | 8500 | 8500 + s_86 | s_860 | s_8600 | 2242-07-11 12:53:20 | 86 | 860 | 8600 | 8600 | 8600 | 8600 | 8600 | s_86 | s_860 | s_8600 | 2242-07-11 12:53:20 | 86 | 860 | 8600 | 8600 | 8600 | 8600 | 8600 + s_87 | s_870 | s_8700 | 2245-09-10 10:40:00 | 87 | 870 | 8700 | 8700 | 8700 | 8700 | 8700 | s_87 | s_870 | s_8700 | 2245-09-10 10:40:00 | 87 | 870 | 8700 | 8700 | 8700 | 8700 | 8700 + s_88 | s_880 | s_8800 | 2248-11-10 08:26:40 | 88 | 880 | 8800 | 8800 | 8800 | 8800 | 8800 | s_88 | s_880 | s_8800 | 2248-11-10 08:26:40 | 88 | 880 | 8800 | 8800 | 8800 | 8800 | 8800 + s_89 | s_890 | s_8900 | 2252-01-12 06:13:20 | 89 | 890 | 8900 | 8900 | 8900 | 8900 | 8900 | s_89 | s_890 | s_8900 | 2252-01-12 06:13:20 | 89 | 890 | 8900 | 8900 | 8900 | 8900 | 8900 + s_90 | s_900 | s_9000 | 2255-03-14 04:00:00 | 90 | 900 | 9000 | 9000 | 9000 | 9000 | 9000 | s_90 | s_900 | s_9000 | 2255-03-14 04:00:00 | 90 | 900 | 9000 | 9000 | 9000 | 9000 | 9000 + s_91 | s_910 | s_9100 | 2258-05-15 01:46:40 | 91 | 910 | 9100 | 9100 | 9100 | 9100 | 9100 | s_91 | s_910 | s_9100 | 2258-05-15 01:46:40 | 91 | 910 | 9100 | 9100 | 9100 | 9100 | 9100 + s_92 | s_920 | s_9200 | 2261-07-15 11:33:20 | 92 | 920 | 9200 | 9200 | 9200 | 9200 | 9200 | s_92 | s_920 | s_9200 | 2261-07-15 11:33:20 | 92 | 920 | 9200 | 9200 | 9200 | 9200 | 9200 + s_93 | s_930 | s_9300 | 2264-09-14 09:20:00 | 93 | 930 | 9300 | 9300 | 9300 | 9300 | 9300 | s_93 | s_930 | s_9300 | 2264-09-14 09:20:00 | 93 | 930 | 9300 | 9300 | 9300 | 9300 | 9300 + s_94 | s_940 | s_9400 | 2267-11-16 07:06:40 | 94 | 940 | 9400 | 9400 | 9400 | 9400 | 9400 | s_94 | s_940 | s_9400 | 2267-11-16 07:06:40 | 94 | 940 | 9400 | 9400 | 9400 | 9400 | 9400 + s_95 | s_950 | s_9500 | 2271-01-16 04:53:20 | 95 | 950 | 9500 | 9500 | 9500 | 9500 | 9500 | s_95 | s_950 | s_9500 | 2271-01-16 04:53:20 | 95 | 950 | 9500 | 9500 | 9500 | 9500 | 9500 + s_96 | s_960 | s_9600 | 2274-03-19 02:40:00 | 96 | 960 | 9600 | 9600 | 9600 | 9600 | 9600 | s_96 | s_960 | s_9600 | 2274-03-19 02:40:00 | 96 | 960 | 9600 | 9600 | 9600 | 9600 | 9600 + s_97 | s_970 | s_9700 | 2277-05-19 12:26:40 | 97 | 970 | 9700 | 9700 | 9700 | 9700 | 9700 | s_97 | s_970 | s_9700 | 2277-05-19 12:26:40 | 97 | 970 | 9700 | 9700 | 9700 | 9700 | 9700 + s_98 | s_980 | s_9800 | 2280-07-19 10:13:20 | 98 | 980 | 9800 | 9800 | 9800 | 9800 | 9800 | s_98 | s_980 | s_9800 | 2280-07-19 10:13:20 | 98 | 980 | 9800 | 9800 | 9800 | 9800 | 9800 + s_99 | s_990 | s_9900 | 2283-09-20 08:00:00 | 99 | 990 | 9900 | 9900 | 9900 | 9900 | 9900 | s_99 | s_990 | s_9900 | 2283-09-20 08:00:00 | 99 | 990 | 9900 | 9900 | 9900 | 9900 | 9900 + s_100 | s_1000 | s_10000 | 2286-11-20 05:46:40 | 100 | 1000 | 10000 | 10000 | 10000 | 10000 | 10000 | s_100 | s_1000 | s_10000 | 2286-11-20 05:46:40 | 100 | 1000 | 10000 | 10000 | 10000 | 10000 | 10000 +(90 rows) \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte/runTest.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte/runTest.py new file mode 100755 index 0000000000..9d317200ad --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfMultibyteTwoByteDelimiterData(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte/sql/query01.sql b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte/sql/query01.sql new file mode 100755 index 0000000000..c62387b7c0 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte/sql/query01.sql @@ -0,0 +1,5 @@ +-- @description query01 for PXF Multibyte delimiter, 2-byte delim cases + +SELECT * from pxf_multibyte_twobyte_data ORDER BY n1; + +SELECT * from pxf_multibyte_twobyte_data_with_skip ORDER BY n1; \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_no_delim/__init__.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_no_delim/__init__.py new file mode 100755 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_no_delim/expected/query01.ans b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_no_delim/expected/query01.ans new file mode 100755 index 0000000000..8ffcdc6345 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_no_delim/expected/query01.ans @@ -0,0 +1,8 @@ +-- start_ignore +-- m/DETAIL/ +-- s/DETAIL/CONTEXT/ +-- end_ignore +-- @description query01 for PXF Multibyte delimiter, 2-byte delim cases with no delim provided +SELECT * from pxf_multibyte_twobyte_nodelim_data ORDER BY n1; +ERROR: missing delimiter option (pxfdelimited_formatter.c:xxx) +CONTEXT: External table pxf_multibyte_twobyte_nodelim_data diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_no_delim/runTest.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_no_delim/runTest.py new file mode 100755 index 0000000000..42b48c068f --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_no_delim/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfMultibyteTwoByteNoDelimiter(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_no_delim/sql/query01.sql b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_no_delim/sql/query01.sql new file mode 100755 index 0000000000..551d66cd17 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_no_delim/sql/query01.sql @@ -0,0 +1,3 @@ +-- @description query01 for PXF Multibyte delimiter, 2-byte delim cases with no formatter provided + +SELECT * from pxf_multibyte_twobyte_nodelim_data ORDER BY n1; \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_bzip2/__init__.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_bzip2/__init__.py new file mode 100755 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_bzip2/expected/query01.ans b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_bzip2/expected/query01.ans new file mode 100755 index 0000000000..926843adcf --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_bzip2/expected/query01.ans @@ -0,0 +1,108 @@ +-- start_ignore +-- end_ignore +-- @description query01 for PXF Multibyte delimiter, 2-byte delim cases with compressed bzip2 file +SELECT * from pxf_multibyte_twobyte_withbzip2_data ORDER BY name; + name | num | dub | longnum | bool +-----------+-----+-----+---------------+------ + aa_row_1 | 1 | 1 | 100000000000 | f + aa_row_10 | 10 | 10 | 1000000000000 | t + aa_row_2 | 2 | 2 | 200000000000 | t + aa_row_3 | 3 | 3 | 300000000000 | f + aa_row_4 | 4 | 4 | 400000000000 | t + aa_row_5 | 5 | 5 | 500000000000 | f + aa_row_6 | 6 | 6 | 600000000000 | t + aa_row_7 | 7 | 7 | 700000000000 | f + aa_row_8 | 8 | 8 | 800000000000 | t + aa_row_9 | 9 | 9 | 900000000000 | f + bb_row_1 | 1 | 1 | 100000000000 | f + bb_row_10 | 10 | 10 | 1000000000000 | t + bb_row_2 | 2 | 2 | 200000000000 | t + bb_row_3 | 3 | 3 | 300000000000 | f + bb_row_4 | 4 | 4 | 400000000000 | t + bb_row_5 | 5 | 5 | 500000000000 | f + bb_row_6 | 6 | 6 | 600000000000 | t + bb_row_7 | 7 | 7 | 700000000000 | f + bb_row_8 | 8 | 8 | 800000000000 | t + bb_row_9 | 9 | 9 | 900000000000 | f + cc_row_1 | 1 | 1 | 100000000000 | f + cc_row_10 | 10 | 10 | 1000000000000 | t + cc_row_2 | 2 | 2 | 200000000000 | t + cc_row_3 | 3 | 3 | 300000000000 | f + cc_row_4 | 4 | 4 | 400000000000 | t + cc_row_5 | 5 | 5 | 500000000000 | f + cc_row_6 | 6 | 6 | 600000000000 | t + cc_row_7 | 7 | 7 | 700000000000 | f + cc_row_8 | 8 | 8 | 800000000000 | t + cc_row_9 | 9 | 9 | 900000000000 | f + dd_row_1 | 1 | 1 | 100000000000 | f + dd_row_10 | 10 | 10 | 1000000000000 | t + dd_row_2 | 2 | 2 | 200000000000 | t + dd_row_3 | 3 | 3 | 300000000000 | f + dd_row_4 | 4 | 4 | 400000000000 | t + dd_row_5 | 5 | 5 | 500000000000 | f + dd_row_6 | 6 | 6 | 600000000000 | t + dd_row_7 | 7 | 7 | 700000000000 | f + dd_row_8 | 8 | 8 | 800000000000 | t + dd_row_9 | 9 | 9 | 900000000000 | f + ee_row_1 | 1 | 1 | 100000000000 | f + ee_row_10 | 10 | 10 | 1000000000000 | t + ee_row_2 | 2 | 2 | 200000000000 | t + ee_row_3 | 3 | 3 | 300000000000 | f + ee_row_4 | 4 | 4 | 400000000000 | t + ee_row_5 | 5 | 5 | 500000000000 | f + ee_row_6 | 6 | 6 | 600000000000 | t + ee_row_7 | 7 | 7 | 700000000000 | f + ee_row_8 | 8 | 8 | 800000000000 | t + ee_row_9 | 9 | 9 | 900000000000 | f + ff_row_1 | 1 | 1 | 100000000000 | f + ff_row_10 | 10 | 10 | 1000000000000 | t + ff_row_2 | 2 | 2 | 200000000000 | t + ff_row_3 | 3 | 3 | 300000000000 | f + ff_row_4 | 4 | 4 | 400000000000 | t + ff_row_5 | 5 | 5 | 500000000000 | f + ff_row_6 | 6 | 6 | 600000000000 | t + ff_row_7 | 7 | 7 | 700000000000 | f + ff_row_8 | 8 | 8 | 800000000000 | t + ff_row_9 | 9 | 9 | 900000000000 | f + gg_row_1 | 1 | 1 | 100000000000 | f + gg_row_10 | 10 | 10 | 1000000000000 | t + gg_row_2 | 2 | 2 | 200000000000 | t + gg_row_3 | 3 | 3 | 300000000000 | f + gg_row_4 | 4 | 4 | 400000000000 | t + gg_row_5 | 5 | 5 | 500000000000 | f + gg_row_6 | 6 | 6 | 600000000000 | t + gg_row_7 | 7 | 7 | 700000000000 | f + gg_row_8 | 8 | 8 | 800000000000 | t + gg_row_9 | 9 | 9 | 900000000000 | f + hh_row_1 | 1 | 1 | 100000000000 | f + hh_row_10 | 10 | 10 | 1000000000000 | t + hh_row_2 | 2 | 2 | 200000000000 | t + hh_row_3 | 3 | 3 | 300000000000 | f + hh_row_4 | 4 | 4 | 400000000000 | t + hh_row_5 | 5 | 5 | 500000000000 | f + hh_row_6 | 6 | 6 | 600000000000 | t + hh_row_7 | 7 | 7 | 700000000000 | f + hh_row_8 | 8 | 8 | 800000000000 | t + hh_row_9 | 9 | 9 | 900000000000 | f + ii_row_1 | 1 | 1 | 100000000000 | f + ii_row_10 | 10 | 10 | 1000000000000 | t + ii_row_2 | 2 | 2 | 200000000000 | t + ii_row_3 | 3 | 3 | 300000000000 | f + ii_row_4 | 4 | 4 | 400000000000 | t + ii_row_5 | 5 | 5 | 500000000000 | f + ii_row_6 | 6 | 6 | 600000000000 | t + ii_row_7 | 7 | 7 | 700000000000 | f + ii_row_8 | 8 | 8 | 800000000000 | t + ii_row_9 | 9 | 9 | 900000000000 | f + jj_row_1 | 1 | 1 | 100000000000 | f + jj_row_10 | 10 | 10 | 1000000000000 | t + jj_row_2 | 2 | 2 | 200000000000 | t + jj_row_3 | 3 | 3 | 300000000000 | f + jj_row_4 | 4 | 4 | 400000000000 | t + jj_row_5 | 5 | 5 | 500000000000 | f + jj_row_6 | 6 | 6 | 600000000000 | t + jj_row_7 | 7 | 7 | 700000000000 | f + jj_row_8 | 8 | 8 | 800000000000 | t + jj_row_9 | 9 | 9 | 900000000000 | f +(100 rows) + diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_bzip2/runTest.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_bzip2/runTest.py new file mode 100755 index 0000000000..2bb72d2f64 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_bzip2/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfMultibyteTwoByteWithBzip2(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_bzip2/sql/query01.sql b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_bzip2/sql/query01.sql new file mode 100755 index 0000000000..f5d31c7f7b --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_bzip2/sql/query01.sql @@ -0,0 +1,3 @@ +-- @description query01 for PXF Multibyte delimiter, 2-byte delim cases with compressed bzip2 file + +SELECT * from pxf_multibyte_twobyte_withbzip2_data ORDER BY name; \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_cr/__init__.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_cr/__init__.py new file mode 100755 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_cr/expected/query01.ans b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_cr/expected/query01.ans new file mode 100755 index 0000000000..b14366a004 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_cr/expected/query01.ans @@ -0,0 +1,106 @@ +-- @description query01 for PXF Multibyte delimiter, 2-byte delim with CR case + +SELECT * from pxf_multibyte_twobyte_withcr_data ORDER BY n1; + s1 | s2 | s3 | d1 | n1 | n2 | n3 | n4 | n5 | n6 | n7 | s11 | s12 | s13 | d11 | n11 | n12 | n13 | n14 | n15 | n16 | n17 +-------+--------+---------+---------------------+-----+------+-------+-------+-------+-------+-------+-------+--------+---------+---------------------+-----+------+-------+-------+-------+-------+------- + s_1 | s_10 | s_100 | 1973-03-03 09:46:40 | 1 | 10 | 100 | 100 | 100 | 100 | 100 | s_1 | s_10 | s_100 | 1973-03-03 09:46:40 | 1 | 10 | 100 | 100 | 100 | 100 | 100 + s_2 | s_20 | s_200 | 1976-05-03 07:33:20 | 2 | 20 | 200 | 200 | 200 | 200 | 200 | s_2 | s_20 | s_200 | 1976-05-03 07:33:20 | 2 | 20 | 200 | 200 | 200 | 200 | 200 + s_3 | s_30 | s_300 | 1979-07-05 05:20:00 | 3 | 30 | 300 | 300 | 300 | 300 | 300 | s_3 | s_30 | s_300 | 1979-07-05 05:20:00 | 3 | 30 | 300 | 300 | 300 | 300 | 300 + s_4 | s_40 | s_400 | 1982-09-04 03:06:40 | 4 | 40 | 400 | 400 | 400 | 400 | 400 | s_4 | s_40 | s_400 | 1982-09-04 03:06:40 | 4 | 40 | 400 | 400 | 400 | 400 | 400 + s_5 | s_50 | s_500 | 1985-11-05 12:53:20 | 5 | 50 | 500 | 500 | 500 | 500 | 500 | s_5 | s_50 | s_500 | 1985-11-05 12:53:20 | 5 | 50 | 500 | 500 | 500 | 500 | 500 + s_6 | s_60 | s_600 | 1989-01-05 10:40:00 | 6 | 60 | 600 | 600 | 600 | 600 | 600 | s_6 | s_60 | s_600 | 1989-01-05 10:40:00 | 6 | 60 | 600 | 600 | 600 | 600 | 600 + s_7 | s_70 | s_700 | 1992-03-07 08:26:40 | 7 | 70 | 700 | 700 | 700 | 700 | 700 | s_7 | s_70 | s_700 | 1992-03-07 08:26:40 | 7 | 70 | 700 | 700 | 700 | 700 | 700 + s_8 | s_80 | s_800 | 1995-05-09 06:13:20 | 8 | 80 | 800 | 800 | 800 | 800 | 800 | s_8 | s_80 | s_800 | 1995-05-09 06:13:20 | 8 | 80 | 800 | 800 | 800 | 800 | 800 + s_9 | s_90 | s_900 | 1998-07-09 04:00:00 | 9 | 90 | 900 | 900 | 900 | 900 | 900 | s_9 | s_90 | s_900 | 1998-07-09 04:00:00 | 9 | 90 | 900 | 900 | 900 | 900 | 900 + s_10 | s_100 | s_1000 | 2001-09-09 01:46:40 | 10 | 100 | 1000 | 1000 | 1000 | 1000 | 1000 | s_10 | s_100 | s_1000 | 2001-09-09 01:46:40 | 10 | 100 | 1000 | 1000 | 1000 | 1000 | 1000 + s_11 | s_110 | s_1100 | 2004-11-09 11:33:20 | 11 | 110 | 1100 | 1100 | 1100 | 1100 | 1100 | s_11 | s_110 | s_1100 | 2004-11-09 11:33:20 | 11 | 110 | 1100 | 1100 | 1100 | 1100 | 1100 + s_12 | s_120 | s_1200 | 2008-01-10 09:20:00 | 12 | 120 | 1200 | 1200 | 1200 | 1200 | 1200 | s_12 | s_120 | s_1200 | 2008-01-10 09:20:00 | 12 | 120 | 1200 | 1200 | 1200 | 1200 | 1200 + s_13 | s_130 | s_1300 | 2011-03-13 07:06:40 | 13 | 130 | 1300 | 1300 | 1300 | 1300 | 1300 | s_13 | s_130 | s_1300 | 2011-03-13 07:06:40 | 13 | 130 | 1300 | 1300 | 1300 | 1300 | 1300 + s_14 | s_140 | s_1400 | 2014-05-13 04:53:20 | 14 | 140 | 1400 | 1400 | 1400 | 1400 | 1400 | s_14 | s_140 | s_1400 | 2014-05-13 04:53:20 | 14 | 140 | 1400 | 1400 | 1400 | 1400 | 1400 + s_15 | s_150 | s_1500 | 2017-07-14 02:40:00 | 15 | 150 | 1500 | 1500 | 1500 | 1500 | 1500 | s_15 | s_150 | s_1500 | 2017-07-14 02:40:00 | 15 | 150 | 1500 | 1500 | 1500 | 1500 | 1500 + s_16 | s_160 | s_1600 | 2020-09-13 12:26:40 | 16 | 160 | 1600 | 1600 | 1600 | 1600 | 1600 | s_16 | s_160 | s_1600 | 2020-09-13 12:26:40 | 16 | 160 | 1600 | 1600 | 1600 | 1600 | 1600 + s_17 | s_170 | s_1700 | 2023-11-14 10:13:20 | 17 | 170 | 1700 | 1700 | 1700 | 1700 | 1700 | s_17 | s_170 | s_1700 | 2023-11-14 10:13:20 | 17 | 170 | 1700 | 1700 | 1700 | 1700 | 1700 + s_18 | s_180 | s_1800 | 2027-01-15 08:00:00 | 18 | 180 | 1800 | 1800 | 1800 | 1800 | 1800 | s_18 | s_180 | s_1800 | 2027-01-15 08:00:00 | 18 | 180 | 1800 | 1800 | 1800 | 1800 | 1800 + s_19 | s_190 | s_1900 | 2030-03-17 05:46:40 | 19 | 190 | 1900 | 1900 | 1900 | 1900 | 1900 | s_19 | s_190 | s_1900 | 2030-03-17 05:46:40 | 19 | 190 | 1900 | 1900 | 1900 | 1900 | 1900 + s_20 | s_200 | s_2000 | 2033-05-18 03:33:20 | 20 | 200 | 2000 | 2000 | 2000 | 2000 | 2000 | s_20 | s_200 | s_2000 | 2033-05-18 03:33:20 | 20 | 200 | 2000 | 2000 | 2000 | 2000 | 2000 + s_21 | s_210 | s_2100 | 2036-07-18 01:20:00 | 21 | 210 | 2100 | 2100 | 2100 | 2100 | 2100 | s_21 | s_210 | s_2100 | 2036-07-18 01:20:00 | 21 | 210 | 2100 | 2100 | 2100 | 2100 | 2100 + s_22 | s_220 | s_2200 | 2039-09-18 11:06:40 | 22 | 220 | 2200 | 2200 | 2200 | 2200 | 2200 | s_22 | s_220 | s_2200 | 2039-09-18 11:06:40 | 22 | 220 | 2200 | 2200 | 2200 | 2200 | 2200 + s_23 | s_230 | s_2300 | 2042-11-19 08:53:20 | 23 | 230 | 2300 | 2300 | 2300 | 2300 | 2300 | s_23 | s_230 | s_2300 | 2042-11-19 08:53:20 | 23 | 230 | 2300 | 2300 | 2300 | 2300 | 2300 + s_24 | s_240 | s_2400 | 2046-01-19 06:40:00 | 24 | 240 | 2400 | 2400 | 2400 | 2400 | 2400 | s_24 | s_240 | s_2400 | 2046-01-19 06:40:00 | 24 | 240 | 2400 | 2400 | 2400 | 2400 | 2400 + s_25 | s_250 | s_2500 | 2049-03-22 04:26:40 | 25 | 250 | 2500 | 2500 | 2500 | 2500 | 2500 | s_25 | s_250 | s_2500 | 2049-03-22 04:26:40 | 25 | 250 | 2500 | 2500 | 2500 | 2500 | 2500 + s_26 | s_260 | s_2600 | 2052-05-22 02:13:20 | 26 | 260 | 2600 | 2600 | 2600 | 2600 | 2600 | s_26 | s_260 | s_2600 | 2052-05-22 02:13:20 | 26 | 260 | 2600 | 2600 | 2600 | 2600 | 2600 + s_27 | s_270 | s_2700 | 2055-07-24 12:00:00 | 27 | 270 | 2700 | 2700 | 2700 | 2700 | 2700 | s_27 | s_270 | s_2700 | 2055-07-24 12:00:00 | 27 | 270 | 2700 | 2700 | 2700 | 2700 | 2700 + s_28 | s_280 | s_2800 | 2058-09-23 09:46:40 | 28 | 280 | 2800 | 2800 | 2800 | 2800 | 2800 | s_28 | s_280 | s_2800 | 2058-09-23 09:46:40 | 28 | 280 | 2800 | 2800 | 2800 | 2800 | 2800 + s_29 | s_290 | s_2900 | 2061-11-23 07:33:20 | 29 | 290 | 2900 | 2900 | 2900 | 2900 | 2900 | s_29 | s_290 | s_2900 | 2061-11-23 07:33:20 | 29 | 290 | 2900 | 2900 | 2900 | 2900 | 2900 + s_30 | s_300 | s_3000 | 2065-01-24 05:20:00 | 30 | 300 | 3000 | 3000 | 3000 | 3000 | 3000 | s_30 | s_300 | s_3000 | 2065-01-24 05:20:00 | 30 | 300 | 3000 | 3000 | 3000 | 3000 | 3000 + s_31 | s_310 | s_3100 | 2068-03-26 03:06:40 | 31 | 310 | 3100 | 3100 | 3100 | 3100 | 3100 | s_31 | s_310 | s_3100 | 2068-03-26 03:06:40 | 31 | 310 | 3100 | 3100 | 3100 | 3100 | 3100 + s_32 | s_320 | s_3200 | 2071-05-28 12:53:20 | 32 | 320 | 3200 | 3200 | 3200 | 3200 | 3200 | s_32 | s_320 | s_3200 | 2071-05-28 12:53:20 | 32 | 320 | 3200 | 3200 | 3200 | 3200 | 3200 + s_33 | s_330 | s_3300 | 2074-07-28 10:40:00 | 33 | 330 | 3300 | 3300 | 3300 | 3300 | 3300 | s_33 | s_330 | s_3300 | 2074-07-28 10:40:00 | 33 | 330 | 3300 | 3300 | 3300 | 3300 | 3300 + s_34 | s_340 | s_3400 | 2077-09-27 08:26:40 | 34 | 340 | 3400 | 3400 | 3400 | 3400 | 3400 | s_34 | s_340 | s_3400 | 2077-09-27 08:26:40 | 34 | 340 | 3400 | 3400 | 3400 | 3400 | 3400 + s_35 | s_350 | s_3500 | 2080-11-28 06:13:20 | 35 | 350 | 3500 | 3500 | 3500 | 3500 | 3500 | s_35 | s_350 | s_3500 | 2080-11-28 06:13:20 | 35 | 350 | 3500 | 3500 | 3500 | 3500 | 3500 + s_36 | s_360 | s_3600 | 2084-01-29 04:00:00 | 36 | 360 | 3600 | 3600 | 3600 | 3600 | 3600 | s_36 | s_360 | s_3600 | 2084-01-29 04:00:00 | 36 | 360 | 3600 | 3600 | 3600 | 3600 | 3600 + s_37 | s_370 | s_3700 | 2087-04-01 01:46:40 | 37 | 370 | 3700 | 3700 | 3700 | 3700 | 3700 | s_37 | s_370 | s_3700 | 2087-04-01 01:46:40 | 37 | 370 | 3700 | 3700 | 3700 | 3700 | 3700 + s_38 | s_380 | s_3800 | 2090-06-01 11:33:20 | 38 | 380 | 3800 | 3800 | 3800 | 3800 | 3800 | s_38 | s_380 | s_3800 | 2090-06-01 11:33:20 | 38 | 380 | 3800 | 3800 | 3800 | 3800 | 3800 + s_39 | s_390 | s_3900 | 2093-08-01 09:20:00 | 39 | 390 | 3900 | 3900 | 3900 | 3900 | 3900 | s_39 | s_390 | s_3900 | 2093-08-01 09:20:00 | 39 | 390 | 3900 | 3900 | 3900 | 3900 | 3900 + s_40 | s_400 | s_4000 | 2096-10-02 07:06:40 | 40 | 400 | 4000 | 4000 | 4000 | 4000 | 4000 | s_40 | s_400 | s_4000 | 2096-10-02 07:06:40 | 40 | 400 | 4000 | 4000 | 4000 | 4000 | 4000 + s_41 | s_410 | s_4100 | 2099-12-03 04:53:20 | 41 | 410 | 4100 | 4100 | 4100 | 4100 | 4100 | s_41 | s_410 | s_4100 | 2099-12-03 04:53:20 | 41 | 410 | 4100 | 4100 | 4100 | 4100 | 4100 + s_42 | s_420 | s_4200 | 2103-02-04 02:40:00 | 42 | 420 | 4200 | 4200 | 4200 | 4200 | 4200 | s_42 | s_420 | s_4200 | 2103-02-04 02:40:00 | 42 | 420 | 4200 | 4200 | 4200 | 4200 | 4200 + s_43 | s_430 | s_4300 | 2106-04-06 12:26:40 | 43 | 430 | 4300 | 4300 | 4300 | 4300 | 4300 | s_43 | s_430 | s_4300 | 2106-04-06 12:26:40 | 43 | 430 | 4300 | 4300 | 4300 | 4300 | 4300 + s_44 | s_440 | s_4400 | 2109-06-06 10:13:20 | 44 | 440 | 4400 | 4400 | 4400 | 4400 | 4400 | s_44 | s_440 | s_4400 | 2109-06-06 10:13:20 | 44 | 440 | 4400 | 4400 | 4400 | 4400 | 4400 + s_45 | s_450 | s_4500 | 2112-08-07 08:00:00 | 45 | 450 | 4500 | 4500 | 4500 | 4500 | 4500 | s_45 | s_450 | s_4500 | 2112-08-07 08:00:00 | 45 | 450 | 4500 | 4500 | 4500 | 4500 | 4500 + s_46 | s_460 | s_4600 | 2115-10-08 05:46:40 | 46 | 460 | 4600 | 4600 | 4600 | 4600 | 4600 | s_46 | s_460 | s_4600 | 2115-10-08 05:46:40 | 46 | 460 | 4600 | 4600 | 4600 | 4600 | 4600 + s_47 | s_470 | s_4700 | 2118-12-09 03:33:20 | 47 | 470 | 4700 | 4700 | 4700 | 4700 | 4700 | s_47 | s_470 | s_4700 | 2118-12-09 03:33:20 | 47 | 470 | 4700 | 4700 | 4700 | 4700 | 4700 + s_48 | s_480 | s_4800 | 2122-02-08 01:20:00 | 48 | 480 | 4800 | 4800 | 4800 | 4800 | 4800 | s_48 | s_480 | s_4800 | 2122-02-08 01:20:00 | 48 | 480 | 4800 | 4800 | 4800 | 4800 | 4800 + s_49 | s_490 | s_4900 | 2125-04-10 11:06:40 | 49 | 490 | 4900 | 4900 | 4900 | 4900 | 4900 | s_49 | s_490 | s_4900 | 2125-04-10 11:06:40 | 49 | 490 | 4900 | 4900 | 4900 | 4900 | 4900 + s_50 | s_500 | s_5000 | 2128-06-11 08:53:20 | 50 | 500 | 5000 | 5000 | 5000 | 5000 | 5000 | s_50 | s_500 | s_5000 | 2128-06-11 08:53:20 | 50 | 500 | 5000 | 5000 | 5000 | 5000 | 5000 + s_51 | s_510 | s_5100 | 2131-08-12 06:40:00 | 51 | 510 | 5100 | 5100 | 5100 | 5100 | 5100 | s_51 | s_510 | s_5100 | 2131-08-12 06:40:00 | 51 | 510 | 5100 | 5100 | 5100 | 5100 | 5100 + s_52 | s_520 | s_5200 | 2134-10-13 04:26:40 | 52 | 520 | 5200 | 5200 | 5200 | 5200 | 5200 | s_52 | s_520 | s_5200 | 2134-10-13 04:26:40 | 52 | 520 | 5200 | 5200 | 5200 | 5200 | 5200 + s_53 | s_530 | s_5300 | 2137-12-13 02:13:20 | 53 | 530 | 5300 | 5300 | 5300 | 5300 | 5300 | s_53 | s_530 | s_5300 | 2137-12-13 02:13:20 | 53 | 530 | 5300 | 5300 | 5300 | 5300 | 5300 + s_54 | s_540 | s_5400 | 2141-02-13 12:00:00 | 54 | 540 | 5400 | 5400 | 5400 | 5400 | 5400 | s_54 | s_540 | s_5400 | 2141-02-13 12:00:00 | 54 | 540 | 5400 | 5400 | 5400 | 5400 | 5400 + s_55 | s_550 | s_5500 | 2144-04-15 09:46:40 | 55 | 550 | 5500 | 5500 | 5500 | 5500 | 5500 | s_55 | s_550 | s_5500 | 2144-04-15 09:46:40 | 55 | 550 | 5500 | 5500 | 5500 | 5500 | 5500 + s_56 | s_560 | s_5600 | 2147-06-16 07:33:20 | 56 | 560 | 5600 | 5600 | 5600 | 5600 | 5600 | s_56 | s_560 | s_5600 | 2147-06-16 07:33:20 | 56 | 560 | 5600 | 5600 | 5600 | 5600 | 5600 + s_57 | s_570 | s_5700 | 2150-08-17 05:20:00 | 57 | 570 | 5700 | 5700 | 5700 | 5700 | 5700 | s_57 | s_570 | s_5700 | 2150-08-17 05:20:00 | 57 | 570 | 5700 | 5700 | 5700 | 5700 | 5700 + s_58 | s_580 | s_5800 | 2153-10-17 03:06:40 | 58 | 580 | 5800 | 5800 | 5800 | 5800 | 5800 | s_58 | s_580 | s_5800 | 2153-10-17 03:06:40 | 58 | 580 | 5800 | 5800 | 5800 | 5800 | 5800 + s_59 | s_590 | s_5900 | 2156-12-18 12:53:20 | 59 | 590 | 5900 | 5900 | 5900 | 5900 | 5900 | s_59 | s_590 | s_5900 | 2156-12-18 12:53:20 | 59 | 590 | 5900 | 5900 | 5900 | 5900 | 5900 + s_60 | s_600 | s_6000 | 2160-02-18 10:40:00 | 60 | 600 | 6000 | 6000 | 6000 | 6000 | 6000 | s_60 | s_600 | s_6000 | 2160-02-18 10:40:00 | 60 | 600 | 6000 | 6000 | 6000 | 6000 | 6000 + s_61 | s_610 | s_6100 | 2163-04-20 08:26:40 | 61 | 610 | 6100 | 6100 | 6100 | 6100 | 6100 | s_61 | s_610 | s_6100 | 2163-04-20 08:26:40 | 61 | 610 | 6100 | 6100 | 6100 | 6100 | 6100 + s_62 | s_620 | s_6200 | 2166-06-21 06:13:20 | 62 | 620 | 6200 | 6200 | 6200 | 6200 | 6200 | s_62 | s_620 | s_6200 | 2166-06-21 06:13:20 | 62 | 620 | 6200 | 6200 | 6200 | 6200 | 6200 + s_63 | s_630 | s_6300 | 2169-08-21 04:00:00 | 63 | 630 | 6300 | 6300 | 6300 | 6300 | 6300 | s_63 | s_630 | s_6300 | 2169-08-21 04:00:00 | 63 | 630 | 6300 | 6300 | 6300 | 6300 | 6300 + s_64 | s_640 | s_6400 | 2172-10-22 01:46:40 | 64 | 640 | 6400 | 6400 | 6400 | 6400 | 6400 | s_64 | s_640 | s_6400 | 2172-10-22 01:46:40 | 64 | 640 | 6400 | 6400 | 6400 | 6400 | 6400 + s_65 | s_650 | s_6500 | 2175-12-23 11:33:20 | 65 | 650 | 6500 | 6500 | 6500 | 6500 | 6500 | s_65 | s_650 | s_6500 | 2175-12-23 11:33:20 | 65 | 650 | 6500 | 6500 | 6500 | 6500 | 6500 + s_66 | s_660 | s_6600 | 2179-02-22 09:20:00 | 66 | 660 | 6600 | 6600 | 6600 | 6600 | 6600 | s_66 | s_660 | s_6600 | 2179-02-22 09:20:00 | 66 | 660 | 6600 | 6600 | 6600 | 6600 | 6600 + s_67 | s_670 | s_6700 | 2182-04-25 07:06:40 | 67 | 670 | 6700 | 6700 | 6700 | 6700 | 6700 | s_67 | s_670 | s_6700 | 2182-04-25 07:06:40 | 67 | 670 | 6700 | 6700 | 6700 | 6700 | 6700 + s_68 | s_680 | s_6800 | 2185-06-25 04:53:20 | 68 | 680 | 6800 | 6800 | 6800 | 6800 | 6800 | s_68 | s_680 | s_6800 | 2185-06-25 04:53:20 | 68 | 680 | 6800 | 6800 | 6800 | 6800 | 6800 + s_69 | s_690 | s_6900 | 2188-08-26 02:40:00 | 69 | 690 | 6900 | 6900 | 6900 | 6900 | 6900 | s_69 | s_690 | s_6900 | 2188-08-26 02:40:00 | 69 | 690 | 6900 | 6900 | 6900 | 6900 | 6900 + s_70 | s_700 | s_7000 | 2191-10-27 12:26:40 | 70 | 700 | 7000 | 7000 | 7000 | 7000 | 7000 | s_70 | s_700 | s_7000 | 2191-10-27 12:26:40 | 70 | 700 | 7000 | 7000 | 7000 | 7000 | 7000 + s_71 | s_710 | s_7100 | 2194-12-27 10:13:20 | 71 | 710 | 7100 | 7100 | 7100 | 7100 | 7100 | s_71 | s_710 | s_7100 | 2194-12-27 10:13:20 | 71 | 710 | 7100 | 7100 | 7100 | 7100 | 7100 + s_72 | s_720 | s_7200 | 2198-02-27 08:00:00 | 72 | 720 | 7200 | 7200 | 7200 | 7200 | 7200 | s_72 | s_720 | s_7200 | 2198-02-27 08:00:00 | 72 | 720 | 7200 | 7200 | 7200 | 7200 | 7200 + s_73 | s_730 | s_7300 | 2201-04-30 05:46:40 | 73 | 730 | 7300 | 7300 | 7300 | 7300 | 7300 | s_73 | s_730 | s_7300 | 2201-04-30 05:46:40 | 73 | 730 | 7300 | 7300 | 7300 | 7300 | 7300 + s_74 | s_740 | s_7400 | 2204-07-01 03:33:20 | 74 | 740 | 7400 | 7400 | 7400 | 7400 | 7400 | s_74 | s_740 | s_7400 | 2204-07-01 03:33:20 | 74 | 740 | 7400 | 7400 | 7400 | 7400 | 7400 + s_75 | s_750 | s_7500 | 2207-09-01 01:20:00 | 75 | 750 | 7500 | 7500 | 7500 | 7500 | 7500 | s_75 | s_750 | s_7500 | 2207-09-01 01:20:00 | 75 | 750 | 7500 | 7500 | 7500 | 7500 | 7500 + s_76 | s_760 | s_7600 | 2210-11-01 11:06:40 | 76 | 760 | 7600 | 7600 | 7600 | 7600 | 7600 | s_76 | s_760 | s_7600 | 2210-11-01 11:06:40 | 76 | 760 | 7600 | 7600 | 7600 | 7600 | 7600 + s_77 | s_770 | s_7700 | 2214-01-02 08:53:20 | 77 | 770 | 7700 | 7700 | 7700 | 7700 | 7700 | s_77 | s_770 | s_7700 | 2214-01-02 08:53:20 | 77 | 770 | 7700 | 7700 | 7700 | 7700 | 7700 + s_78 | s_780 | s_7800 | 2217-03-04 06:40:00 | 78 | 780 | 7800 | 7800 | 7800 | 7800 | 7800 | s_78 | s_780 | s_7800 | 2217-03-04 06:40:00 | 78 | 780 | 7800 | 7800 | 7800 | 7800 | 7800 + s_79 | s_790 | s_7900 | 2220-05-05 04:26:40 | 79 | 790 | 7900 | 7900 | 7900 | 7900 | 7900 | s_79 | s_790 | s_7900 | 2220-05-05 04:26:40 | 79 | 790 | 7900 | 7900 | 7900 | 7900 | 7900 + s_80 | s_800 | s_8000 | 2223-07-06 02:13:20 | 80 | 800 | 8000 | 8000 | 8000 | 8000 | 8000 | s_80 | s_800 | s_8000 | 2223-07-06 02:13:20 | 80 | 800 | 8000 | 8000 | 8000 | 8000 | 8000 + s_81 | s_810 | s_8100 | 2226-09-06 12:00:00 | 81 | 810 | 8100 | 8100 | 8100 | 8100 | 8100 | s_81 | s_810 | s_8100 | 2226-09-06 12:00:00 | 81 | 810 | 8100 | 8100 | 8100 | 8100 | 8100 + s_82 | s_820 | s_8200 | 2229-11-06 09:46:40 | 82 | 820 | 8200 | 8200 | 8200 | 8200 | 8200 | s_82 | s_820 | s_8200 | 2229-11-06 09:46:40 | 82 | 820 | 8200 | 8200 | 8200 | 8200 | 8200 + s_83 | s_830 | s_8300 | 2233-01-06 07:33:20 | 83 | 830 | 8300 | 8300 | 8300 | 8300 | 8300 | s_83 | s_830 | s_8300 | 2233-01-06 07:33:20 | 83 | 830 | 8300 | 8300 | 8300 | 8300 | 8300 + s_84 | s_840 | s_8400 | 2236-03-09 05:20:00 | 84 | 840 | 8400 | 8400 | 8400 | 8400 | 8400 | s_84 | s_840 | s_8400 | 2236-03-09 05:20:00 | 84 | 840 | 8400 | 8400 | 8400 | 8400 | 8400 + s_85 | s_850 | s_8500 | 2239-05-10 03:06:40 | 85 | 850 | 8500 | 8500 | 8500 | 8500 | 8500 | s_85 | s_850 | s_8500 | 2239-05-10 03:06:40 | 85 | 850 | 8500 | 8500 | 8500 | 8500 | 8500 + s_86 | s_860 | s_8600 | 2242-07-11 12:53:20 | 86 | 860 | 8600 | 8600 | 8600 | 8600 | 8600 | s_86 | s_860 | s_8600 | 2242-07-11 12:53:20 | 86 | 860 | 8600 | 8600 | 8600 | 8600 | 8600 + s_87 | s_870 | s_8700 | 2245-09-10 10:40:00 | 87 | 870 | 8700 | 8700 | 8700 | 8700 | 8700 | s_87 | s_870 | s_8700 | 2245-09-10 10:40:00 | 87 | 870 | 8700 | 8700 | 8700 | 8700 | 8700 + s_88 | s_880 | s_8800 | 2248-11-10 08:26:40 | 88 | 880 | 8800 | 8800 | 8800 | 8800 | 8800 | s_88 | s_880 | s_8800 | 2248-11-10 08:26:40 | 88 | 880 | 8800 | 8800 | 8800 | 8800 | 8800 + s_89 | s_890 | s_8900 | 2252-01-12 06:13:20 | 89 | 890 | 8900 | 8900 | 8900 | 8900 | 8900 | s_89 | s_890 | s_8900 | 2252-01-12 06:13:20 | 89 | 890 | 8900 | 8900 | 8900 | 8900 | 8900 + s_90 | s_900 | s_9000 | 2255-03-14 04:00:00 | 90 | 900 | 9000 | 9000 | 9000 | 9000 | 9000 | s_90 | s_900 | s_9000 | 2255-03-14 04:00:00 | 90 | 900 | 9000 | 9000 | 9000 | 9000 | 9000 + s_91 | s_910 | s_9100 | 2258-05-15 01:46:40 | 91 | 910 | 9100 | 9100 | 9100 | 9100 | 9100 | s_91 | s_910 | s_9100 | 2258-05-15 01:46:40 | 91 | 910 | 9100 | 9100 | 9100 | 9100 | 9100 + s_92 | s_920 | s_9200 | 2261-07-15 11:33:20 | 92 | 920 | 9200 | 9200 | 9200 | 9200 | 9200 | s_92 | s_920 | s_9200 | 2261-07-15 11:33:20 | 92 | 920 | 9200 | 9200 | 9200 | 9200 | 9200 + s_93 | s_930 | s_9300 | 2264-09-14 09:20:00 | 93 | 930 | 9300 | 9300 | 9300 | 9300 | 9300 | s_93 | s_930 | s_9300 | 2264-09-14 09:20:00 | 93 | 930 | 9300 | 9300 | 9300 | 9300 | 9300 + s_94 | s_940 | s_9400 | 2267-11-16 07:06:40 | 94 | 940 | 9400 | 9400 | 9400 | 9400 | 9400 | s_94 | s_940 | s_9400 | 2267-11-16 07:06:40 | 94 | 940 | 9400 | 9400 | 9400 | 9400 | 9400 + s_95 | s_950 | s_9500 | 2271-01-16 04:53:20 | 95 | 950 | 9500 | 9500 | 9500 | 9500 | 9500 | s_95 | s_950 | s_9500 | 2271-01-16 04:53:20 | 95 | 950 | 9500 | 9500 | 9500 | 9500 | 9500 + s_96 | s_960 | s_9600 | 2274-03-19 02:40:00 | 96 | 960 | 9600 | 9600 | 9600 | 9600 | 9600 | s_96 | s_960 | s_9600 | 2274-03-19 02:40:00 | 96 | 960 | 9600 | 9600 | 9600 | 9600 | 9600 + s_97 | s_970 | s_9700 | 2277-05-19 12:26:40 | 97 | 970 | 9700 | 9700 | 9700 | 9700 | 9700 | s_97 | s_970 | s_9700 | 2277-05-19 12:26:40 | 97 | 970 | 9700 | 9700 | 9700 | 9700 | 9700 + s_98 | s_980 | s_9800 | 2280-07-19 10:13:20 | 98 | 980 | 9800 | 9800 | 9800 | 9800 | 9800 | s_98 | s_980 | s_9800 | 2280-07-19 10:13:20 | 98 | 980 | 9800 | 9800 | 9800 | 9800 | 9800 + s_99 | s_990 | s_9900 | 2283-09-20 08:00:00 | 99 | 990 | 9900 | 9900 | 9900 | 9900 | 9900 | s_99 | s_990 | s_9900 | 2283-09-20 08:00:00 | 99 | 990 | 9900 | 9900 | 9900 | 9900 | 9900 + s_100 | s_1000 | s_10000 | 2286-11-20 05:46:40 | 100 | 1000 | 10000 | 10000 | 10000 | 10000 | 10000 | s_100 | s_1000 | s_10000 | 2286-11-20 05:46:40 | 100 | 1000 | 10000 | 10000 | 10000 | 10000 | 10000 +(100 rows) diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_cr/runTest.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_cr/runTest.py new file mode 100755 index 0000000000..fbcc948d4a --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_cr/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfMultibyteTwoByteWithCRDelimiterData(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_cr/sql/query01.sql b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_cr/sql/query01.sql new file mode 100755 index 0000000000..45f1a82fe8 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_cr/sql/query01.sql @@ -0,0 +1,3 @@ +-- @description query01 for PXF Multibyte delimiter, 2-byte delim with CR case + +SELECT * from pxf_multibyte_twobyte_withcr_data ORDER BY n1; diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_crlf/__init__.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_crlf/__init__.py new file mode 100755 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_crlf/expected/query01.ans b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_crlf/expected/query01.ans new file mode 100755 index 0000000000..641e450f83 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_crlf/expected/query01.ans @@ -0,0 +1,106 @@ +-- @description query01 for PXF Multibyte delimiter, 2-byte delim with CRLF case + +SELECT * from pxf_multibyte_twobyte_withcrlf_data ORDER BY n1; + s1 | s2 | s3 | d1 | n1 | n2 | n3 | n4 | n5 | n6 | n7 | s11 | s12 | s13 | d11 | n11 | n12 | n13 | n14 | n15 | n16 | n17 +-------+--------+---------+---------------------+-----+------+-------+-------+-------+-------+-------+-------+--------+---------+---------------------+-----+------+-------+-------+-------+-------+------- + s_1 | s_10 | s_100 | 1973-03-03 09:46:40 | 1 | 10 | 100 | 100 | 100 | 100 | 100 | s_1 | s_10 | s_100 | 1973-03-03 09:46:40 | 1 | 10 | 100 | 100 | 100 | 100 | 100 + s_2 | s_20 | s_200 | 1976-05-03 07:33:20 | 2 | 20 | 200 | 200 | 200 | 200 | 200 | s_2 | s_20 | s_200 | 1976-05-03 07:33:20 | 2 | 20 | 200 | 200 | 200 | 200 | 200 + s_3 | s_30 | s_300 | 1979-07-05 05:20:00 | 3 | 30 | 300 | 300 | 300 | 300 | 300 | s_3 | s_30 | s_300 | 1979-07-05 05:20:00 | 3 | 30 | 300 | 300 | 300 | 300 | 300 + s_4 | s_40 | s_400 | 1982-09-04 03:06:40 | 4 | 40 | 400 | 400 | 400 | 400 | 400 | s_4 | s_40 | s_400 | 1982-09-04 03:06:40 | 4 | 40 | 400 | 400 | 400 | 400 | 400 + s_5 | s_50 | s_500 | 1985-11-05 12:53:20 | 5 | 50 | 500 | 500 | 500 | 500 | 500 | s_5 | s_50 | s_500 | 1985-11-05 12:53:20 | 5 | 50 | 500 | 500 | 500 | 500 | 500 + s_6 | s_60 | s_600 | 1989-01-05 10:40:00 | 6 | 60 | 600 | 600 | 600 | 600 | 600 | s_6 | s_60 | s_600 | 1989-01-05 10:40:00 | 6 | 60 | 600 | 600 | 600 | 600 | 600 + s_7 | s_70 | s_700 | 1992-03-07 08:26:40 | 7 | 70 | 700 | 700 | 700 | 700 | 700 | s_7 | s_70 | s_700 | 1992-03-07 08:26:40 | 7 | 70 | 700 | 700 | 700 | 700 | 700 + s_8 | s_80 | s_800 | 1995-05-09 06:13:20 | 8 | 80 | 800 | 800 | 800 | 800 | 800 | s_8 | s_80 | s_800 | 1995-05-09 06:13:20 | 8 | 80 | 800 | 800 | 800 | 800 | 800 + s_9 | s_90 | s_900 | 1998-07-09 04:00:00 | 9 | 90 | 900 | 900 | 900 | 900 | 900 | s_9 | s_90 | s_900 | 1998-07-09 04:00:00 | 9 | 90 | 900 | 900 | 900 | 900 | 900 + s_10 | s_100 | s_1000 | 2001-09-09 01:46:40 | 10 | 100 | 1000 | 1000 | 1000 | 1000 | 1000 | s_10 | s_100 | s_1000 | 2001-09-09 01:46:40 | 10 | 100 | 1000 | 1000 | 1000 | 1000 | 1000 + s_11 | s_110 | s_1100 | 2004-11-09 11:33:20 | 11 | 110 | 1100 | 1100 | 1100 | 1100 | 1100 | s_11 | s_110 | s_1100 | 2004-11-09 11:33:20 | 11 | 110 | 1100 | 1100 | 1100 | 1100 | 1100 + s_12 | s_120 | s_1200 | 2008-01-10 09:20:00 | 12 | 120 | 1200 | 1200 | 1200 | 1200 | 1200 | s_12 | s_120 | s_1200 | 2008-01-10 09:20:00 | 12 | 120 | 1200 | 1200 | 1200 | 1200 | 1200 + s_13 | s_130 | s_1300 | 2011-03-13 07:06:40 | 13 | 130 | 1300 | 1300 | 1300 | 1300 | 1300 | s_13 | s_130 | s_1300 | 2011-03-13 07:06:40 | 13 | 130 | 1300 | 1300 | 1300 | 1300 | 1300 + s_14 | s_140 | s_1400 | 2014-05-13 04:53:20 | 14 | 140 | 1400 | 1400 | 1400 | 1400 | 1400 | s_14 | s_140 | s_1400 | 2014-05-13 04:53:20 | 14 | 140 | 1400 | 1400 | 1400 | 1400 | 1400 + s_15 | s_150 | s_1500 | 2017-07-14 02:40:00 | 15 | 150 | 1500 | 1500 | 1500 | 1500 | 1500 | s_15 | s_150 | s_1500 | 2017-07-14 02:40:00 | 15 | 150 | 1500 | 1500 | 1500 | 1500 | 1500 + s_16 | s_160 | s_1600 | 2020-09-13 12:26:40 | 16 | 160 | 1600 | 1600 | 1600 | 1600 | 1600 | s_16 | s_160 | s_1600 | 2020-09-13 12:26:40 | 16 | 160 | 1600 | 1600 | 1600 | 1600 | 1600 + s_17 | s_170 | s_1700 | 2023-11-14 10:13:20 | 17 | 170 | 1700 | 1700 | 1700 | 1700 | 1700 | s_17 | s_170 | s_1700 | 2023-11-14 10:13:20 | 17 | 170 | 1700 | 1700 | 1700 | 1700 | 1700 + s_18 | s_180 | s_1800 | 2027-01-15 08:00:00 | 18 | 180 | 1800 | 1800 | 1800 | 1800 | 1800 | s_18 | s_180 | s_1800 | 2027-01-15 08:00:00 | 18 | 180 | 1800 | 1800 | 1800 | 1800 | 1800 + s_19 | s_190 | s_1900 | 2030-03-17 05:46:40 | 19 | 190 | 1900 | 1900 | 1900 | 1900 | 1900 | s_19 | s_190 | s_1900 | 2030-03-17 05:46:40 | 19 | 190 | 1900 | 1900 | 1900 | 1900 | 1900 + s_20 | s_200 | s_2000 | 2033-05-18 03:33:20 | 20 | 200 | 2000 | 2000 | 2000 | 2000 | 2000 | s_20 | s_200 | s_2000 | 2033-05-18 03:33:20 | 20 | 200 | 2000 | 2000 | 2000 | 2000 | 2000 + s_21 | s_210 | s_2100 | 2036-07-18 01:20:00 | 21 | 210 | 2100 | 2100 | 2100 | 2100 | 2100 | s_21 | s_210 | s_2100 | 2036-07-18 01:20:00 | 21 | 210 | 2100 | 2100 | 2100 | 2100 | 2100 + s_22 | s_220 | s_2200 | 2039-09-18 11:06:40 | 22 | 220 | 2200 | 2200 | 2200 | 2200 | 2200 | s_22 | s_220 | s_2200 | 2039-09-18 11:06:40 | 22 | 220 | 2200 | 2200 | 2200 | 2200 | 2200 + s_23 | s_230 | s_2300 | 2042-11-19 08:53:20 | 23 | 230 | 2300 | 2300 | 2300 | 2300 | 2300 | s_23 | s_230 | s_2300 | 2042-11-19 08:53:20 | 23 | 230 | 2300 | 2300 | 2300 | 2300 | 2300 + s_24 | s_240 | s_2400 | 2046-01-19 06:40:00 | 24 | 240 | 2400 | 2400 | 2400 | 2400 | 2400 | s_24 | s_240 | s_2400 | 2046-01-19 06:40:00 | 24 | 240 | 2400 | 2400 | 2400 | 2400 | 2400 + s_25 | s_250 | s_2500 | 2049-03-22 04:26:40 | 25 | 250 | 2500 | 2500 | 2500 | 2500 | 2500 | s_25 | s_250 | s_2500 | 2049-03-22 04:26:40 | 25 | 250 | 2500 | 2500 | 2500 | 2500 | 2500 + s_26 | s_260 | s_2600 | 2052-05-22 02:13:20 | 26 | 260 | 2600 | 2600 | 2600 | 2600 | 2600 | s_26 | s_260 | s_2600 | 2052-05-22 02:13:20 | 26 | 260 | 2600 | 2600 | 2600 | 2600 | 2600 + s_27 | s_270 | s_2700 | 2055-07-24 12:00:00 | 27 | 270 | 2700 | 2700 | 2700 | 2700 | 2700 | s_27 | s_270 | s_2700 | 2055-07-24 12:00:00 | 27 | 270 | 2700 | 2700 | 2700 | 2700 | 2700 + s_28 | s_280 | s_2800 | 2058-09-23 09:46:40 | 28 | 280 | 2800 | 2800 | 2800 | 2800 | 2800 | s_28 | s_280 | s_2800 | 2058-09-23 09:46:40 | 28 | 280 | 2800 | 2800 | 2800 | 2800 | 2800 + s_29 | s_290 | s_2900 | 2061-11-23 07:33:20 | 29 | 290 | 2900 | 2900 | 2900 | 2900 | 2900 | s_29 | s_290 | s_2900 | 2061-11-23 07:33:20 | 29 | 290 | 2900 | 2900 | 2900 | 2900 | 2900 + s_30 | s_300 | s_3000 | 2065-01-24 05:20:00 | 30 | 300 | 3000 | 3000 | 3000 | 3000 | 3000 | s_30 | s_300 | s_3000 | 2065-01-24 05:20:00 | 30 | 300 | 3000 | 3000 | 3000 | 3000 | 3000 + s_31 | s_310 | s_3100 | 2068-03-26 03:06:40 | 31 | 310 | 3100 | 3100 | 3100 | 3100 | 3100 | s_31 | s_310 | s_3100 | 2068-03-26 03:06:40 | 31 | 310 | 3100 | 3100 | 3100 | 3100 | 3100 + s_32 | s_320 | s_3200 | 2071-05-28 12:53:20 | 32 | 320 | 3200 | 3200 | 3200 | 3200 | 3200 | s_32 | s_320 | s_3200 | 2071-05-28 12:53:20 | 32 | 320 | 3200 | 3200 | 3200 | 3200 | 3200 + s_33 | s_330 | s_3300 | 2074-07-28 10:40:00 | 33 | 330 | 3300 | 3300 | 3300 | 3300 | 3300 | s_33 | s_330 | s_3300 | 2074-07-28 10:40:00 | 33 | 330 | 3300 | 3300 | 3300 | 3300 | 3300 + s_34 | s_340 | s_3400 | 2077-09-27 08:26:40 | 34 | 340 | 3400 | 3400 | 3400 | 3400 | 3400 | s_34 | s_340 | s_3400 | 2077-09-27 08:26:40 | 34 | 340 | 3400 | 3400 | 3400 | 3400 | 3400 + s_35 | s_350 | s_3500 | 2080-11-28 06:13:20 | 35 | 350 | 3500 | 3500 | 3500 | 3500 | 3500 | s_35 | s_350 | s_3500 | 2080-11-28 06:13:20 | 35 | 350 | 3500 | 3500 | 3500 | 3500 | 3500 + s_36 | s_360 | s_3600 | 2084-01-29 04:00:00 | 36 | 360 | 3600 | 3600 | 3600 | 3600 | 3600 | s_36 | s_360 | s_3600 | 2084-01-29 04:00:00 | 36 | 360 | 3600 | 3600 | 3600 | 3600 | 3600 + s_37 | s_370 | s_3700 | 2087-04-01 01:46:40 | 37 | 370 | 3700 | 3700 | 3700 | 3700 | 3700 | s_37 | s_370 | s_3700 | 2087-04-01 01:46:40 | 37 | 370 | 3700 | 3700 | 3700 | 3700 | 3700 + s_38 | s_380 | s_3800 | 2090-06-01 11:33:20 | 38 | 380 | 3800 | 3800 | 3800 | 3800 | 3800 | s_38 | s_380 | s_3800 | 2090-06-01 11:33:20 | 38 | 380 | 3800 | 3800 | 3800 | 3800 | 3800 + s_39 | s_390 | s_3900 | 2093-08-01 09:20:00 | 39 | 390 | 3900 | 3900 | 3900 | 3900 | 3900 | s_39 | s_390 | s_3900 | 2093-08-01 09:20:00 | 39 | 390 | 3900 | 3900 | 3900 | 3900 | 3900 + s_40 | s_400 | s_4000 | 2096-10-02 07:06:40 | 40 | 400 | 4000 | 4000 | 4000 | 4000 | 4000 | s_40 | s_400 | s_4000 | 2096-10-02 07:06:40 | 40 | 400 | 4000 | 4000 | 4000 | 4000 | 4000 + s_41 | s_410 | s_4100 | 2099-12-03 04:53:20 | 41 | 410 | 4100 | 4100 | 4100 | 4100 | 4100 | s_41 | s_410 | s_4100 | 2099-12-03 04:53:20 | 41 | 410 | 4100 | 4100 | 4100 | 4100 | 4100 + s_42 | s_420 | s_4200 | 2103-02-04 02:40:00 | 42 | 420 | 4200 | 4200 | 4200 | 4200 | 4200 | s_42 | s_420 | s_4200 | 2103-02-04 02:40:00 | 42 | 420 | 4200 | 4200 | 4200 | 4200 | 4200 + s_43 | s_430 | s_4300 | 2106-04-06 12:26:40 | 43 | 430 | 4300 | 4300 | 4300 | 4300 | 4300 | s_43 | s_430 | s_4300 | 2106-04-06 12:26:40 | 43 | 430 | 4300 | 4300 | 4300 | 4300 | 4300 + s_44 | s_440 | s_4400 | 2109-06-06 10:13:20 | 44 | 440 | 4400 | 4400 | 4400 | 4400 | 4400 | s_44 | s_440 | s_4400 | 2109-06-06 10:13:20 | 44 | 440 | 4400 | 4400 | 4400 | 4400 | 4400 + s_45 | s_450 | s_4500 | 2112-08-07 08:00:00 | 45 | 450 | 4500 | 4500 | 4500 | 4500 | 4500 | s_45 | s_450 | s_4500 | 2112-08-07 08:00:00 | 45 | 450 | 4500 | 4500 | 4500 | 4500 | 4500 + s_46 | s_460 | s_4600 | 2115-10-08 05:46:40 | 46 | 460 | 4600 | 4600 | 4600 | 4600 | 4600 | s_46 | s_460 | s_4600 | 2115-10-08 05:46:40 | 46 | 460 | 4600 | 4600 | 4600 | 4600 | 4600 + s_47 | s_470 | s_4700 | 2118-12-09 03:33:20 | 47 | 470 | 4700 | 4700 | 4700 | 4700 | 4700 | s_47 | s_470 | s_4700 | 2118-12-09 03:33:20 | 47 | 470 | 4700 | 4700 | 4700 | 4700 | 4700 + s_48 | s_480 | s_4800 | 2122-02-08 01:20:00 | 48 | 480 | 4800 | 4800 | 4800 | 4800 | 4800 | s_48 | s_480 | s_4800 | 2122-02-08 01:20:00 | 48 | 480 | 4800 | 4800 | 4800 | 4800 | 4800 + s_49 | s_490 | s_4900 | 2125-04-10 11:06:40 | 49 | 490 | 4900 | 4900 | 4900 | 4900 | 4900 | s_49 | s_490 | s_4900 | 2125-04-10 11:06:40 | 49 | 490 | 4900 | 4900 | 4900 | 4900 | 4900 + s_50 | s_500 | s_5000 | 2128-06-11 08:53:20 | 50 | 500 | 5000 | 5000 | 5000 | 5000 | 5000 | s_50 | s_500 | s_5000 | 2128-06-11 08:53:20 | 50 | 500 | 5000 | 5000 | 5000 | 5000 | 5000 + s_51 | s_510 | s_5100 | 2131-08-12 06:40:00 | 51 | 510 | 5100 | 5100 | 5100 | 5100 | 5100 | s_51 | s_510 | s_5100 | 2131-08-12 06:40:00 | 51 | 510 | 5100 | 5100 | 5100 | 5100 | 5100 + s_52 | s_520 | s_5200 | 2134-10-13 04:26:40 | 52 | 520 | 5200 | 5200 | 5200 | 5200 | 5200 | s_52 | s_520 | s_5200 | 2134-10-13 04:26:40 | 52 | 520 | 5200 | 5200 | 5200 | 5200 | 5200 + s_53 | s_530 | s_5300 | 2137-12-13 02:13:20 | 53 | 530 | 5300 | 5300 | 5300 | 5300 | 5300 | s_53 | s_530 | s_5300 | 2137-12-13 02:13:20 | 53 | 530 | 5300 | 5300 | 5300 | 5300 | 5300 + s_54 | s_540 | s_5400 | 2141-02-13 12:00:00 | 54 | 540 | 5400 | 5400 | 5400 | 5400 | 5400 | s_54 | s_540 | s_5400 | 2141-02-13 12:00:00 | 54 | 540 | 5400 | 5400 | 5400 | 5400 | 5400 + s_55 | s_550 | s_5500 | 2144-04-15 09:46:40 | 55 | 550 | 5500 | 5500 | 5500 | 5500 | 5500 | s_55 | s_550 | s_5500 | 2144-04-15 09:46:40 | 55 | 550 | 5500 | 5500 | 5500 | 5500 | 5500 + s_56 | s_560 | s_5600 | 2147-06-16 07:33:20 | 56 | 560 | 5600 | 5600 | 5600 | 5600 | 5600 | s_56 | s_560 | s_5600 | 2147-06-16 07:33:20 | 56 | 560 | 5600 | 5600 | 5600 | 5600 | 5600 + s_57 | s_570 | s_5700 | 2150-08-17 05:20:00 | 57 | 570 | 5700 | 5700 | 5700 | 5700 | 5700 | s_57 | s_570 | s_5700 | 2150-08-17 05:20:00 | 57 | 570 | 5700 | 5700 | 5700 | 5700 | 5700 + s_58 | s_580 | s_5800 | 2153-10-17 03:06:40 | 58 | 580 | 5800 | 5800 | 5800 | 5800 | 5800 | s_58 | s_580 | s_5800 | 2153-10-17 03:06:40 | 58 | 580 | 5800 | 5800 | 5800 | 5800 | 5800 + s_59 | s_590 | s_5900 | 2156-12-18 12:53:20 | 59 | 590 | 5900 | 5900 | 5900 | 5900 | 5900 | s_59 | s_590 | s_5900 | 2156-12-18 12:53:20 | 59 | 590 | 5900 | 5900 | 5900 | 5900 | 5900 + s_60 | s_600 | s_6000 | 2160-02-18 10:40:00 | 60 | 600 | 6000 | 6000 | 6000 | 6000 | 6000 | s_60 | s_600 | s_6000 | 2160-02-18 10:40:00 | 60 | 600 | 6000 | 6000 | 6000 | 6000 | 6000 + s_61 | s_610 | s_6100 | 2163-04-20 08:26:40 | 61 | 610 | 6100 | 6100 | 6100 | 6100 | 6100 | s_61 | s_610 | s_6100 | 2163-04-20 08:26:40 | 61 | 610 | 6100 | 6100 | 6100 | 6100 | 6100 + s_62 | s_620 | s_6200 | 2166-06-21 06:13:20 | 62 | 620 | 6200 | 6200 | 6200 | 6200 | 6200 | s_62 | s_620 | s_6200 | 2166-06-21 06:13:20 | 62 | 620 | 6200 | 6200 | 6200 | 6200 | 6200 + s_63 | s_630 | s_6300 | 2169-08-21 04:00:00 | 63 | 630 | 6300 | 6300 | 6300 | 6300 | 6300 | s_63 | s_630 | s_6300 | 2169-08-21 04:00:00 | 63 | 630 | 6300 | 6300 | 6300 | 6300 | 6300 + s_64 | s_640 | s_6400 | 2172-10-22 01:46:40 | 64 | 640 | 6400 | 6400 | 6400 | 6400 | 6400 | s_64 | s_640 | s_6400 | 2172-10-22 01:46:40 | 64 | 640 | 6400 | 6400 | 6400 | 6400 | 6400 + s_65 | s_650 | s_6500 | 2175-12-23 11:33:20 | 65 | 650 | 6500 | 6500 | 6500 | 6500 | 6500 | s_65 | s_650 | s_6500 | 2175-12-23 11:33:20 | 65 | 650 | 6500 | 6500 | 6500 | 6500 | 6500 + s_66 | s_660 | s_6600 | 2179-02-22 09:20:00 | 66 | 660 | 6600 | 6600 | 6600 | 6600 | 6600 | s_66 | s_660 | s_6600 | 2179-02-22 09:20:00 | 66 | 660 | 6600 | 6600 | 6600 | 6600 | 6600 + s_67 | s_670 | s_6700 | 2182-04-25 07:06:40 | 67 | 670 | 6700 | 6700 | 6700 | 6700 | 6700 | s_67 | s_670 | s_6700 | 2182-04-25 07:06:40 | 67 | 670 | 6700 | 6700 | 6700 | 6700 | 6700 + s_68 | s_680 | s_6800 | 2185-06-25 04:53:20 | 68 | 680 | 6800 | 6800 | 6800 | 6800 | 6800 | s_68 | s_680 | s_6800 | 2185-06-25 04:53:20 | 68 | 680 | 6800 | 6800 | 6800 | 6800 | 6800 + s_69 | s_690 | s_6900 | 2188-08-26 02:40:00 | 69 | 690 | 6900 | 6900 | 6900 | 6900 | 6900 | s_69 | s_690 | s_6900 | 2188-08-26 02:40:00 | 69 | 690 | 6900 | 6900 | 6900 | 6900 | 6900 + s_70 | s_700 | s_7000 | 2191-10-27 12:26:40 | 70 | 700 | 7000 | 7000 | 7000 | 7000 | 7000 | s_70 | s_700 | s_7000 | 2191-10-27 12:26:40 | 70 | 700 | 7000 | 7000 | 7000 | 7000 | 7000 + s_71 | s_710 | s_7100 | 2194-12-27 10:13:20 | 71 | 710 | 7100 | 7100 | 7100 | 7100 | 7100 | s_71 | s_710 | s_7100 | 2194-12-27 10:13:20 | 71 | 710 | 7100 | 7100 | 7100 | 7100 | 7100 + s_72 | s_720 | s_7200 | 2198-02-27 08:00:00 | 72 | 720 | 7200 | 7200 | 7200 | 7200 | 7200 | s_72 | s_720 | s_7200 | 2198-02-27 08:00:00 | 72 | 720 | 7200 | 7200 | 7200 | 7200 | 7200 + s_73 | s_730 | s_7300 | 2201-04-30 05:46:40 | 73 | 730 | 7300 | 7300 | 7300 | 7300 | 7300 | s_73 | s_730 | s_7300 | 2201-04-30 05:46:40 | 73 | 730 | 7300 | 7300 | 7300 | 7300 | 7300 + s_74 | s_740 | s_7400 | 2204-07-01 03:33:20 | 74 | 740 | 7400 | 7400 | 7400 | 7400 | 7400 | s_74 | s_740 | s_7400 | 2204-07-01 03:33:20 | 74 | 740 | 7400 | 7400 | 7400 | 7400 | 7400 + s_75 | s_750 | s_7500 | 2207-09-01 01:20:00 | 75 | 750 | 7500 | 7500 | 7500 | 7500 | 7500 | s_75 | s_750 | s_7500 | 2207-09-01 01:20:00 | 75 | 750 | 7500 | 7500 | 7500 | 7500 | 7500 + s_76 | s_760 | s_7600 | 2210-11-01 11:06:40 | 76 | 760 | 7600 | 7600 | 7600 | 7600 | 7600 | s_76 | s_760 | s_7600 | 2210-11-01 11:06:40 | 76 | 760 | 7600 | 7600 | 7600 | 7600 | 7600 + s_77 | s_770 | s_7700 | 2214-01-02 08:53:20 | 77 | 770 | 7700 | 7700 | 7700 | 7700 | 7700 | s_77 | s_770 | s_7700 | 2214-01-02 08:53:20 | 77 | 770 | 7700 | 7700 | 7700 | 7700 | 7700 + s_78 | s_780 | s_7800 | 2217-03-04 06:40:00 | 78 | 780 | 7800 | 7800 | 7800 | 7800 | 7800 | s_78 | s_780 | s_7800 | 2217-03-04 06:40:00 | 78 | 780 | 7800 | 7800 | 7800 | 7800 | 7800 + s_79 | s_790 | s_7900 | 2220-05-05 04:26:40 | 79 | 790 | 7900 | 7900 | 7900 | 7900 | 7900 | s_79 | s_790 | s_7900 | 2220-05-05 04:26:40 | 79 | 790 | 7900 | 7900 | 7900 | 7900 | 7900 + s_80 | s_800 | s_8000 | 2223-07-06 02:13:20 | 80 | 800 | 8000 | 8000 | 8000 | 8000 | 8000 | s_80 | s_800 | s_8000 | 2223-07-06 02:13:20 | 80 | 800 | 8000 | 8000 | 8000 | 8000 | 8000 + s_81 | s_810 | s_8100 | 2226-09-06 12:00:00 | 81 | 810 | 8100 | 8100 | 8100 | 8100 | 8100 | s_81 | s_810 | s_8100 | 2226-09-06 12:00:00 | 81 | 810 | 8100 | 8100 | 8100 | 8100 | 8100 + s_82 | s_820 | s_8200 | 2229-11-06 09:46:40 | 82 | 820 | 8200 | 8200 | 8200 | 8200 | 8200 | s_82 | s_820 | s_8200 | 2229-11-06 09:46:40 | 82 | 820 | 8200 | 8200 | 8200 | 8200 | 8200 + s_83 | s_830 | s_8300 | 2233-01-06 07:33:20 | 83 | 830 | 8300 | 8300 | 8300 | 8300 | 8300 | s_83 | s_830 | s_8300 | 2233-01-06 07:33:20 | 83 | 830 | 8300 | 8300 | 8300 | 8300 | 8300 + s_84 | s_840 | s_8400 | 2236-03-09 05:20:00 | 84 | 840 | 8400 | 8400 | 8400 | 8400 | 8400 | s_84 | s_840 | s_8400 | 2236-03-09 05:20:00 | 84 | 840 | 8400 | 8400 | 8400 | 8400 | 8400 + s_85 | s_850 | s_8500 | 2239-05-10 03:06:40 | 85 | 850 | 8500 | 8500 | 8500 | 8500 | 8500 | s_85 | s_850 | s_8500 | 2239-05-10 03:06:40 | 85 | 850 | 8500 | 8500 | 8500 | 8500 | 8500 + s_86 | s_860 | s_8600 | 2242-07-11 12:53:20 | 86 | 860 | 8600 | 8600 | 8600 | 8600 | 8600 | s_86 | s_860 | s_8600 | 2242-07-11 12:53:20 | 86 | 860 | 8600 | 8600 | 8600 | 8600 | 8600 + s_87 | s_870 | s_8700 | 2245-09-10 10:40:00 | 87 | 870 | 8700 | 8700 | 8700 | 8700 | 8700 | s_87 | s_870 | s_8700 | 2245-09-10 10:40:00 | 87 | 870 | 8700 | 8700 | 8700 | 8700 | 8700 + s_88 | s_880 | s_8800 | 2248-11-10 08:26:40 | 88 | 880 | 8800 | 8800 | 8800 | 8800 | 8800 | s_88 | s_880 | s_8800 | 2248-11-10 08:26:40 | 88 | 880 | 8800 | 8800 | 8800 | 8800 | 8800 + s_89 | s_890 | s_8900 | 2252-01-12 06:13:20 | 89 | 890 | 8900 | 8900 | 8900 | 8900 | 8900 | s_89 | s_890 | s_8900 | 2252-01-12 06:13:20 | 89 | 890 | 8900 | 8900 | 8900 | 8900 | 8900 + s_90 | s_900 | s_9000 | 2255-03-14 04:00:00 | 90 | 900 | 9000 | 9000 | 9000 | 9000 | 9000 | s_90 | s_900 | s_9000 | 2255-03-14 04:00:00 | 90 | 900 | 9000 | 9000 | 9000 | 9000 | 9000 + s_91 | s_910 | s_9100 | 2258-05-15 01:46:40 | 91 | 910 | 9100 | 9100 | 9100 | 9100 | 9100 | s_91 | s_910 | s_9100 | 2258-05-15 01:46:40 | 91 | 910 | 9100 | 9100 | 9100 | 9100 | 9100 + s_92 | s_920 | s_9200 | 2261-07-15 11:33:20 | 92 | 920 | 9200 | 9200 | 9200 | 9200 | 9200 | s_92 | s_920 | s_9200 | 2261-07-15 11:33:20 | 92 | 920 | 9200 | 9200 | 9200 | 9200 | 9200 + s_93 | s_930 | s_9300 | 2264-09-14 09:20:00 | 93 | 930 | 9300 | 9300 | 9300 | 9300 | 9300 | s_93 | s_930 | s_9300 | 2264-09-14 09:20:00 | 93 | 930 | 9300 | 9300 | 9300 | 9300 | 9300 + s_94 | s_940 | s_9400 | 2267-11-16 07:06:40 | 94 | 940 | 9400 | 9400 | 9400 | 9400 | 9400 | s_94 | s_940 | s_9400 | 2267-11-16 07:06:40 | 94 | 940 | 9400 | 9400 | 9400 | 9400 | 9400 + s_95 | s_950 | s_9500 | 2271-01-16 04:53:20 | 95 | 950 | 9500 | 9500 | 9500 | 9500 | 9500 | s_95 | s_950 | s_9500 | 2271-01-16 04:53:20 | 95 | 950 | 9500 | 9500 | 9500 | 9500 | 9500 + s_96 | s_960 | s_9600 | 2274-03-19 02:40:00 | 96 | 960 | 9600 | 9600 | 9600 | 9600 | 9600 | s_96 | s_960 | s_9600 | 2274-03-19 02:40:00 | 96 | 960 | 9600 | 9600 | 9600 | 9600 | 9600 + s_97 | s_970 | s_9700 | 2277-05-19 12:26:40 | 97 | 970 | 9700 | 9700 | 9700 | 9700 | 9700 | s_97 | s_970 | s_9700 | 2277-05-19 12:26:40 | 97 | 970 | 9700 | 9700 | 9700 | 9700 | 9700 + s_98 | s_980 | s_9800 | 2280-07-19 10:13:20 | 98 | 980 | 9800 | 9800 | 9800 | 9800 | 9800 | s_98 | s_980 | s_9800 | 2280-07-19 10:13:20 | 98 | 980 | 9800 | 9800 | 9800 | 9800 | 9800 + s_99 | s_990 | s_9900 | 2283-09-20 08:00:00 | 99 | 990 | 9900 | 9900 | 9900 | 9900 | 9900 | s_99 | s_990 | s_9900 | 2283-09-20 08:00:00 | 99 | 990 | 9900 | 9900 | 9900 | 9900 | 9900 + s_100 | s_1000 | s_10000 | 2286-11-20 05:46:40 | 100 | 1000 | 10000 | 10000 | 10000 | 10000 | 10000 | s_100 | s_1000 | s_10000 | 2286-11-20 05:46:40 | 100 | 1000 | 10000 | 10000 | 10000 | 10000 | 10000 +(100 rows) diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_crlf/runTest.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_crlf/runTest.py new file mode 100755 index 0000000000..b9e7de4f73 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_crlf/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfMultibyteTwoByteWithCRLFDelimiterData(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_crlf/sql/query01.sql b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_crlf/sql/query01.sql new file mode 100755 index 0000000000..806fdf6715 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_crlf/sql/query01.sql @@ -0,0 +1,3 @@ +-- @description query01 for PXF Multibyte delimiter, 2-byte delim with CRLF case + +SELECT * from pxf_multibyte_twobyte_withcrlf_data ORDER BY n1; diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_quote/__init__.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_quote/__init__.py new file mode 100755 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_quote/expected/query01.ans b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_quote/expected/query01.ans new file mode 100755 index 0000000000..18d1709d50 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_quote/expected/query01.ans @@ -0,0 +1,106 @@ +-- @description query01 for PXF Multibyte delimiter, 2-byte delim with quote cases + +SELECT * from pxf_multibyte_twobyte_withquote_data ORDER BY n1; + s1 | s2 | s3 | d1 | n1 | n2 | n3 | n4 | n5 | n6 | n7 | s11 | s12 | s13 | d11 | n11 | n12 | n13 | n14 | n15 | n16 | n17 +-------+--------+---------+---------------------+-----+------+-------+-------+-------+-------+-------+-------+--------+---------+---------------------+-----+------+-------+-------+-------+-------+------- + s_1 | s_10 | s_100 | 1973-03-03 09:46:40 | 1 | 10 | 100 | 100 | 100 | 100 | 100 | s_1 | s_10 | s_100 | 1973-03-03 09:46:40 | 1 | 10 | 100 | 100 | 100 | 100 | 100 + s_2 | s_20 | s_200 | 1976-05-03 07:33:20 | 2 | 20 | 200 | 200 | 200 | 200 | 200 | s_2 | s_20 | s_200 | 1976-05-03 07:33:20 | 2 | 20 | 200 | 200 | 200 | 200 | 200 + s_3 | s_30 | s_300 | 1979-07-05 05:20:00 | 3 | 30 | 300 | 300 | 300 | 300 | 300 | s_3 | s_30 | s_300 | 1979-07-05 05:20:00 | 3 | 30 | 300 | 300 | 300 | 300 | 300 + s_4 | s_40 | s_400 | 1982-09-04 03:06:40 | 4 | 40 | 400 | 400 | 400 | 400 | 400 | s_4 | s_40 | s_400 | 1982-09-04 03:06:40 | 4 | 40 | 400 | 400 | 400 | 400 | 400 + s_5 | s_50 | s_500 | 1985-11-05 12:53:20 | 5 | 50 | 500 | 500 | 500 | 500 | 500 | s_5 | s_50 | s_500 | 1985-11-05 12:53:20 | 5 | 50 | 500 | 500 | 500 | 500 | 500 + s_6 | s_60 | s_600 | 1989-01-05 10:40:00 | 6 | 60 | 600 | 600 | 600 | 600 | 600 | s_6 | s_60 | s_600 | 1989-01-05 10:40:00 | 6 | 60 | 600 | 600 | 600 | 600 | 600 + s_7 | s_70 | s_700 | 1992-03-07 08:26:40 | 7 | 70 | 700 | 700 | 700 | 700 | 700 | s_7 | s_70 | s_700 | 1992-03-07 08:26:40 | 7 | 70 | 700 | 700 | 700 | 700 | 700 + s_8 | s_80 | s_800 | 1995-05-09 06:13:20 | 8 | 80 | 800 | 800 | 800 | 800 | 800 | s_8 | s_80 | s_800 | 1995-05-09 06:13:20 | 8 | 80 | 800 | 800 | 800 | 800 | 800 + s_9 | s_90 | s_900 | 1998-07-09 04:00:00 | 9 | 90 | 900 | 900 | 900 | 900 | 900 | s_9 | s_90 | s_900 | 1998-07-09 04:00:00 | 9 | 90 | 900 | 900 | 900 | 900 | 900 + s_10 | s_100 | s_1000 | 2001-09-09 01:46:40 | 10 | 100 | 1000 | 1000 | 1000 | 1000 | 1000 | s_10 | s_100 | s_1000 | 2001-09-09 01:46:40 | 10 | 100 | 1000 | 1000 | 1000 | 1000 | 1000 + s_11 | s_110 | s_1100 | 2004-11-09 11:33:20 | 11 | 110 | 1100 | 1100 | 1100 | 1100 | 1100 | s_11 | s_110 | s_1100 | 2004-11-09 11:33:20 | 11 | 110 | 1100 | 1100 | 1100 | 1100 | 1100 + s_12 | s_120 | s_1200 | 2008-01-10 09:20:00 | 12 | 120 | 1200 | 1200 | 1200 | 1200 | 1200 | s_12 | s_120 | s_1200 | 2008-01-10 09:20:00 | 12 | 120 | 1200 | 1200 | 1200 | 1200 | 1200 + s_13 | s_130 | s_1300 | 2011-03-13 07:06:40 | 13 | 130 | 1300 | 1300 | 1300 | 1300 | 1300 | s_13 | s_130 | s_1300 | 2011-03-13 07:06:40 | 13 | 130 | 1300 | 1300 | 1300 | 1300 | 1300 + s_14 | s_140 | s_1400 | 2014-05-13 04:53:20 | 14 | 140 | 1400 | 1400 | 1400 | 1400 | 1400 | s_14 | s_140 | s_1400 | 2014-05-13 04:53:20 | 14 | 140 | 1400 | 1400 | 1400 | 1400 | 1400 + s_15 | s_150 | s_1500 | 2017-07-14 02:40:00 | 15 | 150 | 1500 | 1500 | 1500 | 1500 | 1500 | s_15 | s_150 | s_1500 | 2017-07-14 02:40:00 | 15 | 150 | 1500 | 1500 | 1500 | 1500 | 1500 + s_16 | s_160 | s_1600 | 2020-09-13 12:26:40 | 16 | 160 | 1600 | 1600 | 1600 | 1600 | 1600 | s_16 | s_160 | s_1600 | 2020-09-13 12:26:40 | 16 | 160 | 1600 | 1600 | 1600 | 1600 | 1600 + s_17 | s_170 | s_1700 | 2023-11-14 10:13:20 | 17 | 170 | 1700 | 1700 | 1700 | 1700 | 1700 | s_17 | s_170 | s_1700 | 2023-11-14 10:13:20 | 17 | 170 | 1700 | 1700 | 1700 | 1700 | 1700 + s_18 | s_180 | s_1800 | 2027-01-15 08:00:00 | 18 | 180 | 1800 | 1800 | 1800 | 1800 | 1800 | s_18 | s_180 | s_1800 | 2027-01-15 08:00:00 | 18 | 180 | 1800 | 1800 | 1800 | 1800 | 1800 + s_19 | s_190 | s_1900 | 2030-03-17 05:46:40 | 19 | 190 | 1900 | 1900 | 1900 | 1900 | 1900 | s_19 | s_190 | s_1900 | 2030-03-17 05:46:40 | 19 | 190 | 1900 | 1900 | 1900 | 1900 | 1900 + s_20 | s_200 | s_2000 | 2033-05-18 03:33:20 | 20 | 200 | 2000 | 2000 | 2000 | 2000 | 2000 | s_20 | s_200 | s_2000 | 2033-05-18 03:33:20 | 20 | 200 | 2000 | 2000 | 2000 | 2000 | 2000 + s_21 | s_210 | s_2100 | 2036-07-18 01:20:00 | 21 | 210 | 2100 | 2100 | 2100 | 2100 | 2100 | s_21 | s_210 | s_2100 | 2036-07-18 01:20:00 | 21 | 210 | 2100 | 2100 | 2100 | 2100 | 2100 + s_22 | s_220 | s_2200 | 2039-09-18 11:06:40 | 22 | 220 | 2200 | 2200 | 2200 | 2200 | 2200 | s_22 | s_220 | s_2200 | 2039-09-18 11:06:40 | 22 | 220 | 2200 | 2200 | 2200 | 2200 | 2200 + s_23 | s_230 | s_2300 | 2042-11-19 08:53:20 | 23 | 230 | 2300 | 2300 | 2300 | 2300 | 2300 | s_23 | s_230 | s_2300 | 2042-11-19 08:53:20 | 23 | 230 | 2300 | 2300 | 2300 | 2300 | 2300 + s_24 | s_240 | s_2400 | 2046-01-19 06:40:00 | 24 | 240 | 2400 | 2400 | 2400 | 2400 | 2400 | s_24 | s_240 | s_2400 | 2046-01-19 06:40:00 | 24 | 240 | 2400 | 2400 | 2400 | 2400 | 2400 + s_25 | s_250 | s_2500 | 2049-03-22 04:26:40 | 25 | 250 | 2500 | 2500 | 2500 | 2500 | 2500 | s_25 | s_250 | s_2500 | 2049-03-22 04:26:40 | 25 | 250 | 2500 | 2500 | 2500 | 2500 | 2500 + s_26 | s_260 | s_2600 | 2052-05-22 02:13:20 | 26 | 260 | 2600 | 2600 | 2600 | 2600 | 2600 | s_26 | s_260 | s_2600 | 2052-05-22 02:13:20 | 26 | 260 | 2600 | 2600 | 2600 | 2600 | 2600 + s_27 | s_270 | s_2700 | 2055-07-24 12:00:00 | 27 | 270 | 2700 | 2700 | 2700 | 2700 | 2700 | s_27 | s_270 | s_2700 | 2055-07-24 12:00:00 | 27 | 270 | 2700 | 2700 | 2700 | 2700 | 2700 + s_28 | s_280 | s_2800 | 2058-09-23 09:46:40 | 28 | 280 | 2800 | 2800 | 2800 | 2800 | 2800 | s_28 | s_280 | s_2800 | 2058-09-23 09:46:40 | 28 | 280 | 2800 | 2800 | 2800 | 2800 | 2800 + s_29 | s_290 | s_2900 | 2061-11-23 07:33:20 | 29 | 290 | 2900 | 2900 | 2900 | 2900 | 2900 | s_29 | s_290 | s_2900 | 2061-11-23 07:33:20 | 29 | 290 | 2900 | 2900 | 2900 | 2900 | 2900 + s_30 | s_300 | s_3000 | 2065-01-24 05:20:00 | 30 | 300 | 3000 | 3000 | 3000 | 3000 | 3000 | s_30 | s_300 | s_3000 | 2065-01-24 05:20:00 | 30 | 300 | 3000 | 3000 | 3000 | 3000 | 3000 + s_31 | s_310 | s_3100 | 2068-03-26 03:06:40 | 31 | 310 | 3100 | 3100 | 3100 | 3100 | 3100 | s_31 | s_310 | s_3100 | 2068-03-26 03:06:40 | 31 | 310 | 3100 | 3100 | 3100 | 3100 | 3100 + s_32 | s_320 | s_3200 | 2071-05-28 12:53:20 | 32 | 320 | 3200 | 3200 | 3200 | 3200 | 3200 | s_32 | s_320 | s_3200 | 2071-05-28 12:53:20 | 32 | 320 | 3200 | 3200 | 3200 | 3200 | 3200 + s_33 | s_330 | s_3300 | 2074-07-28 10:40:00 | 33 | 330 | 3300 | 3300 | 3300 | 3300 | 3300 | s_33 | s_330 | s_3300 | 2074-07-28 10:40:00 | 33 | 330 | 3300 | 3300 | 3300 | 3300 | 3300 + s_34 | s_340 | s_3400 | 2077-09-27 08:26:40 | 34 | 340 | 3400 | 3400 | 3400 | 3400 | 3400 | s_34 | s_340 | s_3400 | 2077-09-27 08:26:40 | 34 | 340 | 3400 | 3400 | 3400 | 3400 | 3400 + s_35 | s_350 | s_3500 | 2080-11-28 06:13:20 | 35 | 350 | 3500 | 3500 | 3500 | 3500 | 3500 | s_35 | s_350 | s_3500 | 2080-11-28 06:13:20 | 35 | 350 | 3500 | 3500 | 3500 | 3500 | 3500 + s_36 | s_360 | s_3600 | 2084-01-29 04:00:00 | 36 | 360 | 3600 | 3600 | 3600 | 3600 | 3600 | s_36 | s_360 | s_3600 | 2084-01-29 04:00:00 | 36 | 360 | 3600 | 3600 | 3600 | 3600 | 3600 + s_37 | s_370 | s_3700 | 2087-04-01 01:46:40 | 37 | 370 | 3700 | 3700 | 3700 | 3700 | 3700 | s_37 | s_370 | s_3700 | 2087-04-01 01:46:40 | 37 | 370 | 3700 | 3700 | 3700 | 3700 | 3700 + s_38 | s_380 | s_3800 | 2090-06-01 11:33:20 | 38 | 380 | 3800 | 3800 | 3800 | 3800 | 3800 | s_38 | s_380 | s_3800 | 2090-06-01 11:33:20 | 38 | 380 | 3800 | 3800 | 3800 | 3800 | 3800 + s_39 | s_390 | s_3900 | 2093-08-01 09:20:00 | 39 | 390 | 3900 | 3900 | 3900 | 3900 | 3900 | s_39 | s_390 | s_3900 | 2093-08-01 09:20:00 | 39 | 390 | 3900 | 3900 | 3900 | 3900 | 3900 + s_40 | s_400 | s_4000 | 2096-10-02 07:06:40 | 40 | 400 | 4000 | 4000 | 4000 | 4000 | 4000 | s_40 | s_400 | s_4000 | 2096-10-02 07:06:40 | 40 | 400 | 4000 | 4000 | 4000 | 4000 | 4000 + s_41 | s_410 | s_4100 | 2099-12-03 04:53:20 | 41 | 410 | 4100 | 4100 | 4100 | 4100 | 4100 | s_41 | s_410 | s_4100 | 2099-12-03 04:53:20 | 41 | 410 | 4100 | 4100 | 4100 | 4100 | 4100 + s_42 | s_420 | s_4200 | 2103-02-04 02:40:00 | 42 | 420 | 4200 | 4200 | 4200 | 4200 | 4200 | s_42 | s_420 | s_4200 | 2103-02-04 02:40:00 | 42 | 420 | 4200 | 4200 | 4200 | 4200 | 4200 + s_43 | s_430 | s_4300 | 2106-04-06 12:26:40 | 43 | 430 | 4300 | 4300 | 4300 | 4300 | 4300 | s_43 | s_430 | s_4300 | 2106-04-06 12:26:40 | 43 | 430 | 4300 | 4300 | 4300 | 4300 | 4300 + s_44 | s_440 | s_4400 | 2109-06-06 10:13:20 | 44 | 440 | 4400 | 4400 | 4400 | 4400 | 4400 | s_44 | s_440 | s_4400 | 2109-06-06 10:13:20 | 44 | 440 | 4400 | 4400 | 4400 | 4400 | 4400 + s_45 | s_450 | s_4500 | 2112-08-07 08:00:00 | 45 | 450 | 4500 | 4500 | 4500 | 4500 | 4500 | s_45 | s_450 | s_4500 | 2112-08-07 08:00:00 | 45 | 450 | 4500 | 4500 | 4500 | 4500 | 4500 + s_46 | s_460 | s_4600 | 2115-10-08 05:46:40 | 46 | 460 | 4600 | 4600 | 4600 | 4600 | 4600 | s_46 | s_460 | s_4600 | 2115-10-08 05:46:40 | 46 | 460 | 4600 | 4600 | 4600 | 4600 | 4600 + s_47 | s_470 | s_4700 | 2118-12-09 03:33:20 | 47 | 470 | 4700 | 4700 | 4700 | 4700 | 4700 | s_47 | s_470 | s_4700 | 2118-12-09 03:33:20 | 47 | 470 | 4700 | 4700 | 4700 | 4700 | 4700 + s_48 | s_480 | s_4800 | 2122-02-08 01:20:00 | 48 | 480 | 4800 | 4800 | 4800 | 4800 | 4800 | s_48 | s_480 | s_4800 | 2122-02-08 01:20:00 | 48 | 480 | 4800 | 4800 | 4800 | 4800 | 4800 + s_49 | s_490 | s_4900 | 2125-04-10 11:06:40 | 49 | 490 | 4900 | 4900 | 4900 | 4900 | 4900 | s_49 | s_490 | s_4900 | 2125-04-10 11:06:40 | 49 | 490 | 4900 | 4900 | 4900 | 4900 | 4900 + s_50 | s_500 | s_5000 | 2128-06-11 08:53:20 | 50 | 500 | 5000 | 5000 | 5000 | 5000 | 5000 | s_50 | s_500 | s_5000 | 2128-06-11 08:53:20 | 50 | 500 | 5000 | 5000 | 5000 | 5000 | 5000 + s_51 | s_510 | s_5100 | 2131-08-12 06:40:00 | 51 | 510 | 5100 | 5100 | 5100 | 5100 | 5100 | s_51 | s_510 | s_5100 | 2131-08-12 06:40:00 | 51 | 510 | 5100 | 5100 | 5100 | 5100 | 5100 + s_52 | s_520 | s_5200 | 2134-10-13 04:26:40 | 52 | 520 | 5200 | 5200 | 5200 | 5200 | 5200 | s_52 | s_520 | s_5200 | 2134-10-13 04:26:40 | 52 | 520 | 5200 | 5200 | 5200 | 5200 | 5200 + s_53 | s_530 | s_5300 | 2137-12-13 02:13:20 | 53 | 530 | 5300 | 5300 | 5300 | 5300 | 5300 | s_53 | s_530 | s_5300 | 2137-12-13 02:13:20 | 53 | 530 | 5300 | 5300 | 5300 | 5300 | 5300 + s_54 | s_540 | s_5400 | 2141-02-13 12:00:00 | 54 | 540 | 5400 | 5400 | 5400 | 5400 | 5400 | s_54 | s_540 | s_5400 | 2141-02-13 12:00:00 | 54 | 540 | 5400 | 5400 | 5400 | 5400 | 5400 + s_55 | s_550 | s_5500 | 2144-04-15 09:46:40 | 55 | 550 | 5500 | 5500 | 5500 | 5500 | 5500 | s_55 | s_550 | s_5500 | 2144-04-15 09:46:40 | 55 | 550 | 5500 | 5500 | 5500 | 5500 | 5500 + s_56 | s_560 | s_5600 | 2147-06-16 07:33:20 | 56 | 560 | 5600 | 5600 | 5600 | 5600 | 5600 | s_56 | s_560 | s_5600 | 2147-06-16 07:33:20 | 56 | 560 | 5600 | 5600 | 5600 | 5600 | 5600 + s_57 | s_570 | s_5700 | 2150-08-17 05:20:00 | 57 | 570 | 5700 | 5700 | 5700 | 5700 | 5700 | s_57 | s_570 | s_5700 | 2150-08-17 05:20:00 | 57 | 570 | 5700 | 5700 | 5700 | 5700 | 5700 + s_58 | s_580 | s_5800 | 2153-10-17 03:06:40 | 58 | 580 | 5800 | 5800 | 5800 | 5800 | 5800 | s_58 | s_580 | s_5800 | 2153-10-17 03:06:40 | 58 | 580 | 5800 | 5800 | 5800 | 5800 | 5800 + s_59 | s_590 | s_5900 | 2156-12-18 12:53:20 | 59 | 590 | 5900 | 5900 | 5900 | 5900 | 5900 | s_59 | s_590 | s_5900 | 2156-12-18 12:53:20 | 59 | 590 | 5900 | 5900 | 5900 | 5900 | 5900 + s_60 | s_600 | s_6000 | 2160-02-18 10:40:00 | 60 | 600 | 6000 | 6000 | 6000 | 6000 | 6000 | s_60 | s_600 | s_6000 | 2160-02-18 10:40:00 | 60 | 600 | 6000 | 6000 | 6000 | 6000 | 6000 + s_61 | s_610 | s_6100 | 2163-04-20 08:26:40 | 61 | 610 | 6100 | 6100 | 6100 | 6100 | 6100 | s_61 | s_610 | s_6100 | 2163-04-20 08:26:40 | 61 | 610 | 6100 | 6100 | 6100 | 6100 | 6100 + s_62 | s_620 | s_6200 | 2166-06-21 06:13:20 | 62 | 620 | 6200 | 6200 | 6200 | 6200 | 6200 | s_62 | s_620 | s_6200 | 2166-06-21 06:13:20 | 62 | 620 | 6200 | 6200 | 6200 | 6200 | 6200 + s_63 | s_630 | s_6300 | 2169-08-21 04:00:00 | 63 | 630 | 6300 | 6300 | 6300 | 6300 | 6300 | s_63 | s_630 | s_6300 | 2169-08-21 04:00:00 | 63 | 630 | 6300 | 6300 | 6300 | 6300 | 6300 + s_64 | s_640 | s_6400 | 2172-10-22 01:46:40 | 64 | 640 | 6400 | 6400 | 6400 | 6400 | 6400 | s_64 | s_640 | s_6400 | 2172-10-22 01:46:40 | 64 | 640 | 6400 | 6400 | 6400 | 6400 | 6400 + s_65 | s_650 | s_6500 | 2175-12-23 11:33:20 | 65 | 650 | 6500 | 6500 | 6500 | 6500 | 6500 | s_65 | s_650 | s_6500 | 2175-12-23 11:33:20 | 65 | 650 | 6500 | 6500 | 6500 | 6500 | 6500 + s_66 | s_660 | s_6600 | 2179-02-22 09:20:00 | 66 | 660 | 6600 | 6600 | 6600 | 6600 | 6600 | s_66 | s_660 | s_6600 | 2179-02-22 09:20:00 | 66 | 660 | 6600 | 6600 | 6600 | 6600 | 6600 + s_67 | s_670 | s_6700 | 2182-04-25 07:06:40 | 67 | 670 | 6700 | 6700 | 6700 | 6700 | 6700 | s_67 | s_670 | s_6700 | 2182-04-25 07:06:40 | 67 | 670 | 6700 | 6700 | 6700 | 6700 | 6700 + s_68 | s_680 | s_6800 | 2185-06-25 04:53:20 | 68 | 680 | 6800 | 6800 | 6800 | 6800 | 6800 | s_68 | s_680 | s_6800 | 2185-06-25 04:53:20 | 68 | 680 | 6800 | 6800 | 6800 | 6800 | 6800 + s_69 | s_690 | s_6900 | 2188-08-26 02:40:00 | 69 | 690 | 6900 | 6900 | 6900 | 6900 | 6900 | s_69 | s_690 | s_6900 | 2188-08-26 02:40:00 | 69 | 690 | 6900 | 6900 | 6900 | 6900 | 6900 + s_70 | s_700 | s_7000 | 2191-10-27 12:26:40 | 70 | 700 | 7000 | 7000 | 7000 | 7000 | 7000 | s_70 | s_700 | s_7000 | 2191-10-27 12:26:40 | 70 | 700 | 7000 | 7000 | 7000 | 7000 | 7000 + s_71 | s_710 | s_7100 | 2194-12-27 10:13:20 | 71 | 710 | 7100 | 7100 | 7100 | 7100 | 7100 | s_71 | s_710 | s_7100 | 2194-12-27 10:13:20 | 71 | 710 | 7100 | 7100 | 7100 | 7100 | 7100 + s_72 | s_720 | s_7200 | 2198-02-27 08:00:00 | 72 | 720 | 7200 | 7200 | 7200 | 7200 | 7200 | s_72 | s_720 | s_7200 | 2198-02-27 08:00:00 | 72 | 720 | 7200 | 7200 | 7200 | 7200 | 7200 + s_73 | s_730 | s_7300 | 2201-04-30 05:46:40 | 73 | 730 | 7300 | 7300 | 7300 | 7300 | 7300 | s_73 | s_730 | s_7300 | 2201-04-30 05:46:40 | 73 | 730 | 7300 | 7300 | 7300 | 7300 | 7300 + s_74 | s_740 | s_7400 | 2204-07-01 03:33:20 | 74 | 740 | 7400 | 7400 | 7400 | 7400 | 7400 | s_74 | s_740 | s_7400 | 2204-07-01 03:33:20 | 74 | 740 | 7400 | 7400 | 7400 | 7400 | 7400 + s_75 | s_750 | s_7500 | 2207-09-01 01:20:00 | 75 | 750 | 7500 | 7500 | 7500 | 7500 | 7500 | s_75 | s_750 | s_7500 | 2207-09-01 01:20:00 | 75 | 750 | 7500 | 7500 | 7500 | 7500 | 7500 + s_76 | s_760 | s_7600 | 2210-11-01 11:06:40 | 76 | 760 | 7600 | 7600 | 7600 | 7600 | 7600 | s_76 | s_760 | s_7600 | 2210-11-01 11:06:40 | 76 | 760 | 7600 | 7600 | 7600 | 7600 | 7600 + s_77 | s_770 | s_7700 | 2214-01-02 08:53:20 | 77 | 770 | 7700 | 7700 | 7700 | 7700 | 7700 | s_77 | s_770 | s_7700 | 2214-01-02 08:53:20 | 77 | 770 | 7700 | 7700 | 7700 | 7700 | 7700 + s_78 | s_780 | s_7800 | 2217-03-04 06:40:00 | 78 | 780 | 7800 | 7800 | 7800 | 7800 | 7800 | s_78 | s_780 | s_7800 | 2217-03-04 06:40:00 | 78 | 780 | 7800 | 7800 | 7800 | 7800 | 7800 + s_79 | s_790 | s_7900 | 2220-05-05 04:26:40 | 79 | 790 | 7900 | 7900 | 7900 | 7900 | 7900 | s_79 | s_790 | s_7900 | 2220-05-05 04:26:40 | 79 | 790 | 7900 | 7900 | 7900 | 7900 | 7900 + s_80 | s_800 | s_8000 | 2223-07-06 02:13:20 | 80 | 800 | 8000 | 8000 | 8000 | 8000 | 8000 | s_80 | s_800 | s_8000 | 2223-07-06 02:13:20 | 80 | 800 | 8000 | 8000 | 8000 | 8000 | 8000 + s_81 | s_810 | s_8100 | 2226-09-06 12:00:00 | 81 | 810 | 8100 | 8100 | 8100 | 8100 | 8100 | s_81 | s_810 | s_8100 | 2226-09-06 12:00:00 | 81 | 810 | 8100 | 8100 | 8100 | 8100 | 8100 + s_82 | s_820 | s_8200 | 2229-11-06 09:46:40 | 82 | 820 | 8200 | 8200 | 8200 | 8200 | 8200 | s_82 | s_820 | s_8200 | 2229-11-06 09:46:40 | 82 | 820 | 8200 | 8200 | 8200 | 8200 | 8200 + s_83 | s_830 | s_8300 | 2233-01-06 07:33:20 | 83 | 830 | 8300 | 8300 | 8300 | 8300 | 8300 | s_83 | s_830 | s_8300 | 2233-01-06 07:33:20 | 83 | 830 | 8300 | 8300 | 8300 | 8300 | 8300 + s_84 | s_840 | s_8400 | 2236-03-09 05:20:00 | 84 | 840 | 8400 | 8400 | 8400 | 8400 | 8400 | s_84 | s_840 | s_8400 | 2236-03-09 05:20:00 | 84 | 840 | 8400 | 8400 | 8400 | 8400 | 8400 + s_85 | s_850 | s_8500 | 2239-05-10 03:06:40 | 85 | 850 | 8500 | 8500 | 8500 | 8500 | 8500 | s_85 | s_850 | s_8500 | 2239-05-10 03:06:40 | 85 | 850 | 8500 | 8500 | 8500 | 8500 | 8500 + s_86 | s_860 | s_8600 | 2242-07-11 12:53:20 | 86 | 860 | 8600 | 8600 | 8600 | 8600 | 8600 | s_86 | s_860 | s_8600 | 2242-07-11 12:53:20 | 86 | 860 | 8600 | 8600 | 8600 | 8600 | 8600 + s_87 | s_870 | s_8700 | 2245-09-10 10:40:00 | 87 | 870 | 8700 | 8700 | 8700 | 8700 | 8700 | s_87 | s_870 | s_8700 | 2245-09-10 10:40:00 | 87 | 870 | 8700 | 8700 | 8700 | 8700 | 8700 + s_88 | s_880 | s_8800 | 2248-11-10 08:26:40 | 88 | 880 | 8800 | 8800 | 8800 | 8800 | 8800 | s_88 | s_880 | s_8800 | 2248-11-10 08:26:40 | 88 | 880 | 8800 | 8800 | 8800 | 8800 | 8800 + s_89 | s_890 | s_8900 | 2252-01-12 06:13:20 | 89 | 890 | 8900 | 8900 | 8900 | 8900 | 8900 | s_89 | s_890 | s_8900 | 2252-01-12 06:13:20 | 89 | 890 | 8900 | 8900 | 8900 | 8900 | 8900 + s_90 | s_900 | s_9000 | 2255-03-14 04:00:00 | 90 | 900 | 9000 | 9000 | 9000 | 9000 | 9000 | s_90 | s_900 | s_9000 | 2255-03-14 04:00:00 | 90 | 900 | 9000 | 9000 | 9000 | 9000 | 9000 + s_91 | s_910 | s_9100 | 2258-05-15 01:46:40 | 91 | 910 | 9100 | 9100 | 9100 | 9100 | 9100 | s_91 | s_910 | s_9100 | 2258-05-15 01:46:40 | 91 | 910 | 9100 | 9100 | 9100 | 9100 | 9100 + s_92 | s_920 | s_9200 | 2261-07-15 11:33:20 | 92 | 920 | 9200 | 9200 | 9200 | 9200 | 9200 | s_92 | s_920 | s_9200 | 2261-07-15 11:33:20 | 92 | 920 | 9200 | 9200 | 9200 | 9200 | 9200 + s_93 | s_930 | s_9300 | 2264-09-14 09:20:00 | 93 | 930 | 9300 | 9300 | 9300 | 9300 | 9300 | s_93 | s_930 | s_9300 | 2264-09-14 09:20:00 | 93 | 930 | 9300 | 9300 | 9300 | 9300 | 9300 + s_94 | s_940 | s_9400 | 2267-11-16 07:06:40 | 94 | 940 | 9400 | 9400 | 9400 | 9400 | 9400 | s_94 | s_940 | s_9400 | 2267-11-16 07:06:40 | 94 | 940 | 9400 | 9400 | 9400 | 9400 | 9400 + s_95 | s_950 | s_9500 | 2271-01-16 04:53:20 | 95 | 950 | 9500 | 9500 | 9500 | 9500 | 9500 | s_95 | s_950 | s_9500 | 2271-01-16 04:53:20 | 95 | 950 | 9500 | 9500 | 9500 | 9500 | 9500 + s_96 | s_960 | s_9600 | 2274-03-19 02:40:00 | 96 | 960 | 9600 | 9600 | 9600 | 9600 | 9600 | s_96 | s_960 | s_9600 | 2274-03-19 02:40:00 | 96 | 960 | 9600 | 9600 | 9600 | 9600 | 9600 + s_97 | s_970 | s_9700 | 2277-05-19 12:26:40 | 97 | 970 | 9700 | 9700 | 9700 | 9700 | 9700 | s_97 | s_970 | s_9700 | 2277-05-19 12:26:40 | 97 | 970 | 9700 | 9700 | 9700 | 9700 | 9700 + s_98 | s_980 | s_9800 | 2280-07-19 10:13:20 | 98 | 980 | 9800 | 9800 | 9800 | 9800 | 9800 | s_98 | s_980 | s_9800 | 2280-07-19 10:13:20 | 98 | 980 | 9800 | 9800 | 9800 | 9800 | 9800 + s_99 | s_990 | s_9900 | 2283-09-20 08:00:00 | 99 | 990 | 9900 | 9900 | 9900 | 9900 | 9900 | s_99 | s_990 | s_9900 | 2283-09-20 08:00:00 | 99 | 990 | 9900 | 9900 | 9900 | 9900 | 9900 + s_100 | s_1000 | s_10000 | 2286-11-20 05:46:40 | 100 | 1000 | 10000 | 10000 | 10000 | 10000 | 10000 | s_100 | s_1000 | s_10000 | 2286-11-20 05:46:40 | 100 | 1000 | 10000 | 10000 | 10000 | 10000 | 10000 +(100 rows) diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_quote/runTest.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_quote/runTest.py new file mode 100755 index 0000000000..40fff51f2c --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_quote/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfMultibyteTwoByteWithQuoteDelimiterData(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_quote/sql/query01.sql b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_quote/sql/query01.sql new file mode 100755 index 0000000000..0f90a49220 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_quote/sql/query01.sql @@ -0,0 +1,3 @@ +-- @description query01 for PXF Multibyte delimiter, 2-byte delim with quote case + +SELECT * from pxf_multibyte_twobyte_withquote_data ORDER BY n1; \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_quote_and_escape/__init__.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_quote_and_escape/__init__.py new file mode 100755 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_quote_and_escape/expected/query01.ans b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_quote_and_escape/expected/query01.ans new file mode 100755 index 0000000000..2a589ee89f --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_quote_and_escape/expected/query01.ans @@ -0,0 +1,108 @@ +-- start_ignore +-- end_ignore +-- @description query01 for PXF Multibyte delimiter, 2-byte delim with quote and escape case +SELECT * from pxf_multibyte_twobyte_withquote_withescape_data ORDER BY n1; + s1 | s2 | s3 | d1 | n1 | n2 | n3 | n4 | n5 | n6 | n7 | s11 | s12 | s13 | d11 | n11 | n12 | n13 | n14 | n15 | n16 | n17 +-------+--------+---------+---------------------+-----+------+-------+-------+-------+-------+-------+------------------+--------+---------+---------------------+-----+------+-------+-------+-------+-------+------- + s_1 | s_10 | s_100 | 1973-03-03 09:46:40 | 1 | 10 | 100 | 100 | 100 | 100 | 100 | s_1 | s_10 | s_100 | 1973-03-03 09:46:40 | 1 | 10 | 100 | 100 | 100 | 100 | 100 + s_2 | s_20 | s_200 | 1976-05-03 07:33:20 | 2 | 20 | 200 | 200 | 200 | 200 | 200 | s_2 | s_20 | s_200 | 1976-05-03 07:33:20 | 2 | 20 | 200 | 200 | 200 | 200 | 200 + s_3 | s_30 | s_300 | 1979-07-05 05:20:00 | 3 | 30 | 300 | 300 | 300 | 300 | 300 | s_3 | s_30 | s_300 | 1979-07-05 05:20:00 | 3 | 30 | 300 | 300 | 300 | 300 | 300 + s_4 | s_40 | s_400 | 1982-09-04 03:06:40 | 4 | 40 | 400 | 400 | 400 | 400 | 400 | s_4 | s_40 | s_400 | 1982-09-04 03:06:40 | 4 | 40 | 400 | 400 | 400 | 400 | 400 + s_5 | s_50 | s_500 | 1985-11-05 12:53:20 | 5 | 50 | 500 | 500 | 500 | 500 | 500 | s_5 | s_50 | s_500 | 1985-11-05 12:53:20 | 5 | 50 | 500 | 500 | 500 | 500 | 500 + s_6 | s_60 | s_600 | 1989-01-05 10:40:00 | 6 | 60 | 600 | 600 | 600 | 600 | 600 | s_6 | s_60 | s_600 | 1989-01-05 10:40:00 | 6 | 60 | 600 | 600 | 600 | 600 | 600 + s_7 | s_70 | s_700 | 1992-03-07 08:26:40 | 7 | 70 | 700 | 700 | 700 | 700 | 700 | s_7 | s_70 | s_700 | 1992-03-07 08:26:40 | 7 | 70 | 700 | 700 | 700 | 700 | 700 + s_8 | s_80 | s_800 | 1995-05-09 06:13:20 | 8 | 80 | 800 | 800 | 800 | 800 | 800 | s_8 | s_80 | s_800 | 1995-05-09 06:13:20 | 8 | 80 | 800 | 800 | 800 | 800 | 800 + s_9 | s_90 | s_900 | 1998-07-09 04:00:00 | 9 | 90 | 900 | 900 | 900 | 900 | 900 | s_9 | s_90 | s_900 | 1998-07-09 04:00:00 | 9 | 90 | 900 | 900 | 900 | 900 | 900 + s_10 | s_100 | s_1000 | 2001-09-09 01:46:40 | 10 | 100 | 1000 | 1000 | 1000 | 1000 | 1000 | s_10 | s_100 | s_1000 | 2001-09-09 01:46:40 | 10 | 100 | 1000 | 1000 | 1000 | 1000 | 1000 + s_11 | s_110 | s_1100 | 2004-11-09 11:33:20 | 11 | 110 | 1100 | 1100 | 1100 | 1100 | 1100 | s_11 | s_110 | s_1100 | 2004-11-09 11:33:20 | 11 | 110 | 1100 | 1100 | 1100 | 1100 | 1100 + s_12 | s_120 | s_1200 | 2008-01-10 09:20:00 | 12 | 120 | 1200 | 1200 | 1200 | 1200 | 1200 | s_12 | s_120 | s_1200 | 2008-01-10 09:20:00 | 12 | 120 | 1200 | 1200 | 1200 | 1200 | 1200 + s_13 | s_130 | s_1300 | 2011-03-13 07:06:40 | 13 | 130 | 1300 | 1300 | 1300 | 1300 | 1300 | s_13 | s_130 | s_1300 | 2011-03-13 07:06:40 | 13 | 130 | 1300 | 1300 | 1300 | 1300 | 1300 + s_14 | s_140 | s_1400 | 2014-05-13 04:53:20 | 14 | 140 | 1400 | 1400 | 1400 | 1400 | 1400 | s_14 | s_140 | s_1400 | 2014-05-13 04:53:20 | 14 | 140 | 1400 | 1400 | 1400 | 1400 | 1400 + s_15 | s_150 | s_1500 | 2017-07-14 02:40:00 | 15 | 150 | 1500 | 1500 | 1500 | 1500 | 1500 | s_15 | s_150 | s_1500 | 2017-07-14 02:40:00 | 15 | 150 | 1500 | 1500 | 1500 | 1500 | 1500 + s_16 | s_160 | s_1600 | 2020-09-13 12:26:40 | 16 | 160 | 1600 | 1600 | 1600 | 1600 | 1600 | s_16 | s_160 | s_1600 | 2020-09-13 12:26:40 | 16 | 160 | 1600 | 1600 | 1600 | 1600 | 1600 + s_17 | s_170 | s_1700 | 2023-11-14 10:13:20 | 17 | 170 | 1700 | 1700 | 1700 | 1700 | 1700 | s_17 | s_170 | s_1700 | 2023-11-14 10:13:20 | 17 | 170 | 1700 | 1700 | 1700 | 1700 | 1700 + s_18 | s_180 | s_1800 | 2027-01-15 08:00:00 | 18 | 180 | 1800 | 1800 | 1800 | 1800 | 1800 | s_18 | s_180 | s_1800 | 2027-01-15 08:00:00 | 18 | 180 | 1800 | 1800 | 1800 | 1800 | 1800 + s_19 | s_190 | s_1900 | 2030-03-17 05:46:40 | 19 | 190 | 1900 | 1900 | 1900 | 1900 | 1900 | s_19 | s_190 | s_1900 | 2030-03-17 05:46:40 | 19 | 190 | 1900 | 1900 | 1900 | 1900 | 1900 + s_20 | s_200 | s_2000 | 2033-05-18 03:33:20 | 20 | 200 | 2000 | 2000 | 2000 | 2000 | 2000 | s_20 | s_200 | s_2000 | 2033-05-18 03:33:20 | 20 | 200 | 2000 | 2000 | 2000 | 2000 | 2000 + s_21 | s_210 | s_2100 | 2036-07-18 01:20:00 | 21 | 210 | 2100 | 2100 | 2100 | 2100 | 2100 | s_21 | s_210 | s_2100 | 2036-07-18 01:20:00 | 21 | 210 | 2100 | 2100 | 2100 | 2100 | 2100 + s_22 | s_220 | s_2200 | 2039-09-18 11:06:40 | 22 | 220 | 2200 | 2200 | 2200 | 2200 | 2200 | s_22 | s_220 | s_2200 | 2039-09-18 11:06:40 | 22 | 220 | 2200 | 2200 | 2200 | 2200 | 2200 + s_23 | s_230 | s_2300 | 2042-11-19 08:53:20 | 23 | 230 | 2300 | 2300 | 2300 | 2300 | 2300 | s_23 | s_230 | s_2300 | 2042-11-19 08:53:20 | 23 | 230 | 2300 | 2300 | 2300 | 2300 | 2300 + s_24 | s_240 | s_2400 | 2046-01-19 06:40:00 | 24 | 240 | 2400 | 2400 | 2400 | 2400 | 2400 | s_24 | s_240 | s_2400 | 2046-01-19 06:40:00 | 24 | 240 | 2400 | 2400 | 2400 | 2400 | 2400 + s_25 | s_250 | s_2500 | 2049-03-22 04:26:40 | 25 | 250 | 2500 | 2500 | 2500 | 2500 | 2500 | s_25 | s_250 | s_2500 | 2049-03-22 04:26:40 | 25 | 250 | 2500 | 2500 | 2500 | 2500 | 2500 + s_26 | s_260 | s_2600 | 2052-05-22 02:13:20 | 26 | 260 | 2600 | 2600 | 2600 | 2600 | 2600 | s_26 | s_260 | s_2600 | 2052-05-22 02:13:20 | 26 | 260 | 2600 | 2600 | 2600 | 2600 | 2600 + s_27 | s_270 | s_2700 | 2055-07-24 12:00:00 | 27 | 270 | 2700 | 2700 | 2700 | 2700 | 2700 | s_27 | s_270 | s_2700 | 2055-07-24 12:00:00 | 27 | 270 | 2700 | 2700 | 2700 | 2700 | 2700 + s_28 | s_280 | s_2800 | 2058-09-23 09:46:40 | 28 | 280 | 2800 | 2800 | 2800 | 2800 | 2800 | s_28 | s_280 | s_2800 | 2058-09-23 09:46:40 | 28 | 280 | 2800 | 2800 | 2800 | 2800 | 2800 + s_29 | s_290 | s_2900 | 2061-11-23 07:33:20 | 29 | 290 | 2900 | 2900 | 2900 | 2900 | 2900 | s_29 | s_290 | s_2900 | 2061-11-23 07:33:20 | 29 | 290 | 2900 | 2900 | 2900 | 2900 | 2900 + s_30 | s_300 | s_3000 | 2065-01-24 05:20:00 | 30 | 300 | 3000 | 3000 | 3000 | 3000 | 3000 | s_30 | s_300 | s_3000 | 2065-01-24 05:20:00 | 30 | 300 | 3000 | 3000 | 3000 | 3000 | 3000 + s_31 | s_310 | s_3100 | 2068-03-26 03:06:40 | 31 | 310 | 3100 | 3100 | 3100 | 3100 | 3100 | s_31 | s_310 | s_3100 | 2068-03-26 03:06:40 | 31 | 310 | 3100 | 3100 | 3100 | 3100 | 3100 + s_32 | s_320 | s_3200 | 2071-05-28 12:53:20 | 32 | 320 | 3200 | 3200 | 3200 | 3200 | 3200 | s_32 | s_320 | s_3200 | 2071-05-28 12:53:20 | 32 | 320 | 3200 | 3200 | 3200 | 3200 | 3200 + s_33 | s_330 | s_3300 | 2074-07-28 10:40:00 | 33 | 330 | 3300 | 3300 | 3300 | 3300 | 3300 | s_33 | s_330 | s_3300 | 2074-07-28 10:40:00 | 33 | 330 | 3300 | 3300 | 3300 | 3300 | 3300 + s_34 | s_340 | s_3400 | 2077-09-27 08:26:40 | 34 | 340 | 3400 | 3400 | 3400 | 3400 | 3400 | s_34 | s_340 | s_3400 | 2077-09-27 08:26:40 | 34 | 340 | 3400 | 3400 | 3400 | 3400 | 3400 + s_35 | s_350 | s_3500 | 2080-11-28 06:13:20 | 35 | 350 | 3500 | 3500 | 3500 | 3500 | 3500 | s_35 | s_350 | s_3500 | 2080-11-28 06:13:20 | 35 | 350 | 3500 | 3500 | 3500 | 3500 | 3500 + s_36 | s_360 | s_3600 | 2084-01-29 04:00:00 | 36 | 360 | 3600 | 3600 | 3600 | 3600 | 3600 | s_36 | s_360 | s_3600 | 2084-01-29 04:00:00 | 36 | 360 | 3600 | 3600 | 3600 | 3600 | 3600 + s_37 | s_370 | s_3700 | 2087-04-01 01:46:40 | 37 | 370 | 3700 | 3700 | 3700 | 3700 | 3700 | s_37 | s_370 | s_3700 | 2087-04-01 01:46:40 | 37 | 370 | 3700 | 3700 | 3700 | 3700 | 3700 + s_38 | s_380 | s_3800 | 2090-06-01 11:33:20 | 38 | 380 | 3800 | 3800 | 3800 | 3800 | 3800 | s_38 | s_380 | s_3800 | 2090-06-01 11:33:20 | 38 | 380 | 3800 | 3800 | 3800 | 3800 | 3800 + s_39 | s_390 | s_3900 | 2093-08-01 09:20:00 | 39 | 390 | 3900 | 3900 | 3900 | 3900 | 3900 | s_39 | s_390 | s_3900 | 2093-08-01 09:20:00 | 39 | 390 | 3900 | 3900 | 3900 | 3900 | 3900 + s_40 | s_400 | s_4000 | 2096-10-02 07:06:40 | 40 | 400 | 4000 | 4000 | 4000 | 4000 | 4000 | s_40 | s_400 | s_4000 | 2096-10-02 07:06:40 | 40 | 400 | 4000 | 4000 | 4000 | 4000 | 4000 + s_41 | s_410 | s_4100 | 2099-12-03 04:53:20 | 41 | 410 | 4100 | 4100 | 4100 | 4100 | 4100 | s_41 | s_410 | s_4100 | 2099-12-03 04:53:20 | 41 | 410 | 4100 | 4100 | 4100 | 4100 | 4100 + s_42 | s_420 | s_4200 | 2103-02-04 02:40:00 | 42 | 420 | 4200 | 4200 | 4200 | 4200 | 4200 | s_42 | s_420 | s_4200 | 2103-02-04 02:40:00 | 42 | 420 | 4200 | 4200 | 4200 | 4200 | 4200 + s_43 | s_430 | s_4300 | 2106-04-06 12:26:40 | 43 | 430 | 4300 | 4300 | 4300 | 4300 | 4300 | s_43 | s_430 | s_4300 | 2106-04-06 12:26:40 | 43 | 430 | 4300 | 4300 | 4300 | 4300 | 4300 + s_44 | s_440 | s_4400 | 2109-06-06 10:13:20 | 44 | 440 | 4400 | 4400 | 4400 | 4400 | 4400 | s_44 | s_440 | s_4400 | 2109-06-06 10:13:20 | 44 | 440 | 4400 | 4400 | 4400 | 4400 | 4400 + s_45 | s_450 | s_4500 | 2112-08-07 08:00:00 | 45 | 450 | 4500 | 4500 | 4500 | 4500 | 4500 | s_45 | s_450 | s_4500 | 2112-08-07 08:00:00 | 45 | 450 | 4500 | 4500 | 4500 | 4500 | 4500 + s_46 | s_460 | s_4600 | 2115-10-08 05:46:40 | 46 | 460 | 4600 | 4600 | 4600 | 4600 | 4600 | s_46 | s_460 | s_4600 | 2115-10-08 05:46:40 | 46 | 460 | 4600 | 4600 | 4600 | 4600 | 4600 + s_47 | s_470 | s_4700 | 2118-12-09 03:33:20 | 47 | 470 | 4700 | 4700 | 4700 | 4700 | 4700 | s_47 | s_470 | s_4700 | 2118-12-09 03:33:20 | 47 | 470 | 4700 | 4700 | 4700 | 4700 | 4700 + s_48 | s_480 | s_4800 | 2122-02-08 01:20:00 | 48 | 480 | 4800 | 4800 | 4800 | 4800 | 4800 | s_48 | s_480 | s_4800 | 2122-02-08 01:20:00 | 48 | 480 | 4800 | 4800 | 4800 | 4800 | 4800 + s_49 | s_490 | s_4900 | 2125-04-10 11:06:40 | 49 | 490 | 4900 | 4900 | 4900 | 4900 | 4900 | s_49 | s_490 | s_4900 | 2125-04-10 11:06:40 | 49 | 490 | 4900 | 4900 | 4900 | 4900 | 4900 + s_50 | s_500 | s_5000 | 2128-06-11 08:53:20 | 50 | 500 | 5000 | 5000 | 5000 | 5000 | 5000 | s_50 | s_500 | s_5000 | 2128-06-11 08:53:20 | 50 | 500 | 5000 | 5000 | 5000 | 5000 | 5000 + s_51 | s_510 | s_5100 | 2131-08-12 06:40:00 | 51 | 510 | 5100 | 5100 | 5100 | 5100 | 5100 | s_51 | s_510 | s_5100 | 2131-08-12 06:40:00 | 51 | 510 | 5100 | 5100 | 5100 | 5100 | 5100 + s_52 | s_520 | s_5200 | 2134-10-13 04:26:40 | 52 | 520 | 5200 | 5200 | 5200 | 5200 | 5200 | s_52 | s_520 | s_5200 | 2134-10-13 04:26:40 | 52 | 520 | 5200 | 5200 | 5200 | 5200 | 5200 + s_53 | s_530 | s_5300 | 2137-12-13 02:13:20 | 53 | 530 | 5300 | 5300 | 5300 | 5300 | 5300 | s_53 | s_530 | s_5300 | 2137-12-13 02:13:20 | 53 | 530 | 5300 | 5300 | 5300 | 5300 | 5300 + s_54 | s_540 | s_5400 | 2141-02-13 12:00:00 | 54 | 540 | 5400 | 5400 | 5400 | 5400 | 5400 | s_54 | s_540 | s_5400 | 2141-02-13 12:00:00 | 54 | 540 | 5400 | 5400 | 5400 | 5400 | 5400 + s_55 | s_550 | s_5500 | 2144-04-15 09:46:40 | 55 | 550 | 5500 | 5500 | 5500 | 5500 | 5500 | s_55 | s_550 | s_5500 | 2144-04-15 09:46:40 | 55 | 550 | 5500 | 5500 | 5500 | 5500 | 5500 + s_56 | s_560 | s_5600 | 2147-06-16 07:33:20 | 56 | 560 | 5600 | 5600 | 5600 | 5600 | 5600 | s_56 | s_560 | s_5600 | 2147-06-16 07:33:20 | 56 | 560 | 5600 | 5600 | 5600 | 5600 | 5600 + s_57 | s_570 | s_5700 | 2150-08-17 05:20:00 | 57 | 570 | 5700 | 5700 | 5700 | 5700 | 5700 | s_57 | s_570 | s_5700 | 2150-08-17 05:20:00 | 57 | 570 | 5700 | 5700 | 5700 | 5700 | 5700 + s_58 | s_580 | s_5800 | 2153-10-17 03:06:40 | 58 | 580 | 5800 | 5800 | 5800 | 5800 | 5800 | s_58 | s_580 | s_5800 | 2153-10-17 03:06:40 | 58 | 580 | 5800 | 5800 | 5800 | 5800 | 5800 + s_59 | s_590 | s_5900 | 2156-12-18 12:53:20 | 59 | 590 | 5900 | 5900 | 5900 | 5900 | 5900 | s_59 | s_590 | s_5900 | 2156-12-18 12:53:20 | 59 | 590 | 5900 | 5900 | 5900 | 5900 | 5900 + s_60 | s_600 | s_6000 | 2160-02-18 10:40:00 | 60 | 600 | 6000 | 6000 | 6000 | 6000 | 6000 | s_60 | s_600 | s_6000 | 2160-02-18 10:40:00 | 60 | 600 | 6000 | 6000 | 6000 | 6000 | 6000 + s_61 | s_610 | s_6100 | 2163-04-20 08:26:40 | 61 | 610 | 6100 | 6100 | 6100 | 6100 | 6100 | s_61 | s_610 | s_6100 | 2163-04-20 08:26:40 | 61 | 610 | 6100 | 6100 | 6100 | 6100 | 6100 + s_62 | s_620 | s_6200 | 2166-06-21 06:13:20 | 62 | 620 | 6200 | 6200 | 6200 | 6200 | 6200 | s_62 | s_620 | s_6200 | 2166-06-21 06:13:20 | 62 | 620 | 6200 | 6200 | 6200 | 6200 | 6200 + s_63 | s_630 | s_6300 | 2169-08-21 04:00:00 | 63 | 630 | 6300 | 6300 | 6300 | 6300 | 6300 | s_63 | s_630 | s_6300 | 2169-08-21 04:00:00 | 63 | 630 | 6300 | 6300 | 6300 | 6300 | 6300 + s_64 | s_640 | s_6400 | 2172-10-22 01:46:40 | 64 | 640 | 6400 | 6400 | 6400 | 6400 | 6400 | s_64 | s_640 | s_6400 | 2172-10-22 01:46:40 | 64 | 640 | 6400 | 6400 | 6400 | 6400 | 6400 + s_65 | s_650 | s_6500 | 2175-12-23 11:33:20 | 65 | 650 | 6500 | 6500 | 6500 | 6500 | 6500 | s_65 | s_650 | s_6500 | 2175-12-23 11:33:20 | 65 | 650 | 6500 | 6500 | 6500 | 6500 | 6500 + s_66 | s_660 | s_6600 | 2179-02-22 09:20:00 | 66 | 660 | 6600 | 6600 | 6600 | 6600 | 6600 | s_66 | s_660 | s_6600 | 2179-02-22 09:20:00 | 66 | 660 | 6600 | 6600 | 6600 | 6600 | 6600 + s_67 | s_670 | s_6700 | 2182-04-25 07:06:40 | 67 | 670 | 6700 | 6700 | 6700 | 6700 | 6700 | s_67 | s_670 | s_6700 | 2182-04-25 07:06:40 | 67 | 670 | 6700 | 6700 | 6700 | 6700 | 6700 + s_68 | s_680 | s_6800 | 2185-06-25 04:53:20 | 68 | 680 | 6800 | 6800 | 6800 | 6800 | 6800 | s_68 | s_680 | s_6800 | 2185-06-25 04:53:20 | 68 | 680 | 6800 | 6800 | 6800 | 6800 | 6800 + s_69 | s_690 | s_6900 | 2188-08-26 02:40:00 | 69 | 690 | 6900 | 6900 | 6900 | 6900 | 6900 | s_69 | s_690 | s_6900 | 2188-08-26 02:40:00 | 69 | 690 | 6900 | 6900 | 6900 | 6900 | 6900 + s_70 | s_700 | s_7000 | 2191-10-27 12:26:40 | 70 | 700 | 7000 | 7000 | 7000 | 7000 | 7000 | s_70 | s_700 | s_7000 | 2191-10-27 12:26:40 | 70 | 700 | 7000 | 7000 | 7000 | 7000 | 7000 + s_71 | s_710 | s_7100 | 2194-12-27 10:13:20 | 71 | 710 | 7100 | 7100 | 7100 | 7100 | 7100 | s_71 | s_710 | s_7100 | 2194-12-27 10:13:20 | 71 | 710 | 7100 | 7100 | 7100 | 7100 | 7100 + s_72 | s_720 | s_7200 | 2198-02-27 08:00:00 | 72 | 720 | 7200 | 7200 | 7200 | 7200 | 7200 | s_72 | s_720 | s_7200 | 2198-02-27 08:00:00 | 72 | 720 | 7200 | 7200 | 7200 | 7200 | 7200 + s_73 | s_730 | s_7300 | 2201-04-30 05:46:40 | 73 | 730 | 7300 | 7300 | 7300 | 7300 | 7300 | s_73 | s_730 | s_7300 | 2201-04-30 05:46:40 | 73 | 730 | 7300 | 7300 | 7300 | 7300 | 7300 + s_74 | s_740 | s_7400 | 2204-07-01 03:33:20 | 74 | 740 | 7400 | 7400 | 7400 | 7400 | 7400 | s_74 | s_740 | s_7400 | 2204-07-01 03:33:20 | 74 | 740 | 7400 | 7400 | 7400 | 7400 | 7400 + s_75 | s_750 | s_7500 | 2207-09-01 01:20:00 | 75 | 750 | 7500 | 7500 | 7500 | 7500 | 7500 | s_75 | s_750 | s_7500 | 2207-09-01 01:20:00 | 75 | 750 | 7500 | 7500 | 7500 | 7500 | 7500 + s_76 | s_760 | s_7600 | 2210-11-01 11:06:40 | 76 | 760 | 7600 | 7600 | 7600 | 7600 | 7600 | s_76 | s_760 | s_7600 | 2210-11-01 11:06:40 | 76 | 760 | 7600 | 7600 | 7600 | 7600 | 7600 + s_77 | s_770 | s_7700 | 2214-01-02 08:53:20 | 77 | 770 | 7700 | 7700 | 7700 | 7700 | 7700 | s_77 | s_770 | s_7700 | 2214-01-02 08:53:20 | 77 | 770 | 7700 | 7700 | 7700 | 7700 | 7700 + s_78 | s_780 | s_7800 | 2217-03-04 06:40:00 | 78 | 780 | 7800 | 7800 | 7800 | 7800 | 7800 | s_78 | s_780 | s_7800 | 2217-03-04 06:40:00 | 78 | 780 | 7800 | 7800 | 7800 | 7800 | 7800 + s_79 | s_790 | s_7900 | 2220-05-05 04:26:40 | 79 | 790 | 7900 | 7900 | 7900 | 7900 | 7900 | s_79 | s_790 | s_7900 | 2220-05-05 04:26:40 | 79 | 790 | 7900 | 7900 | 7900 | 7900 | 7900 + s_80 | s_800 | s_8000 | 2223-07-06 02:13:20 | 80 | 800 | 8000 | 8000 | 8000 | 8000 | 8000 | s_80 | s_800 | s_8000 | 2223-07-06 02:13:20 | 80 | 800 | 8000 | 8000 | 8000 | 8000 | 8000 + s_81 | s_810 | s_8100 | 2226-09-06 12:00:00 | 81 | 810 | 8100 | 8100 | 8100 | 8100 | 8100 | s_81 | s_810 | s_8100 | 2226-09-06 12:00:00 | 81 | 810 | 8100 | 8100 | 8100 | 8100 | 8100 + s_82 | s_820 | s_8200 | 2229-11-06 09:46:40 | 82 | 820 | 8200 | 8200 | 8200 | 8200 | 8200 | s_82 | s_820 | s_8200 | 2229-11-06 09:46:40 | 82 | 820 | 8200 | 8200 | 8200 | 8200 | 8200 + s_83 | s_830 | s_8300 | 2233-01-06 07:33:20 | 83 | 830 | 8300 | 8300 | 8300 | 8300 | 8300 | s_83 | s_830 | s_8300 | 2233-01-06 07:33:20 | 83 | 830 | 8300 | 8300 | 8300 | 8300 | 8300 + s_84 | s_840 | s_8400 | 2236-03-09 05:20:00 | 84 | 840 | 8400 | 8400 | 8400 | 8400 | 8400 | s_84 | s_840 | s_8400 | 2236-03-09 05:20:00 | 84 | 840 | 8400 | 8400 | 8400 | 8400 | 8400 + s_85 | s_850 | s_8500 | 2239-05-10 03:06:40 | 85 | 850 | 8500 | 8500 | 8500 | 8500 | 8500 | s_85 | s_850 | s_8500 | 2239-05-10 03:06:40 | 85 | 850 | 8500 | 8500 | 8500 | 8500 | 8500 + s_86 | s_860 | s_8600 | 2242-07-11 12:53:20 | 86 | 860 | 8600 | 8600 | 8600 | 8600 | 8600 | s_86 | s_860 | s_8600 | 2242-07-11 12:53:20 | 86 | 860 | 8600 | 8600 | 8600 | 8600 | 8600 + s_87 | s_870 | s_8700 | 2245-09-10 10:40:00 | 87 | 870 | 8700 | 8700 | 8700 | 8700 | 8700 | s_87 | s_870 | s_8700 | 2245-09-10 10:40:00 | 87 | 870 | 8700 | 8700 | 8700 | 8700 | 8700 + s_88 | s_880 | s_8800 | 2248-11-10 08:26:40 | 88 | 880 | 8800 | 8800 | 8800 | 8800 | 8800 | s_88 | s_880 | s_8800 | 2248-11-10 08:26:40 | 88 | 880 | 8800 | 8800 | 8800 | 8800 | 8800 + s_89 | s_890 | s_8900 | 2252-01-12 06:13:20 | 89 | 890 | 8900 | 8900 | 8900 | 8900 | 8900 | s_89 | s_890 | s_8900 | 2252-01-12 06:13:20 | 89 | 890 | 8900 | 8900 | 8900 | 8900 | 8900 + s_90 | s_900 | s_9000 | 2255-03-14 04:00:00 | 90 | 900 | 9000 | 9000 | 9000 | 9000 | 9000 | s_90 | s_900 | s_9000 | 2255-03-14 04:00:00 | 90 | 900 | 9000 | 9000 | 9000 | 9000 | 9000 + s_91 | s_910 | s_9100 | 2258-05-15 01:46:40 | 91 | 910 | 9100 | 9100 | 9100 | 9100 | 9100 | s_91 | s_910 | s_9100 | 2258-05-15 01:46:40 | 91 | 910 | 9100 | 9100 | 9100 | 9100 | 9100 + s_92 | s_920 | s_9200 | 2261-07-15 11:33:20 | 92 | 920 | 9200 | 9200 | 9200 | 9200 | 9200 | s_92 | s_920 | s_9200 | 2261-07-15 11:33:20 | 92 | 920 | 9200 | 9200 | 9200 | 9200 | 9200 + s_93 | s_930 | s_9300 | 2264-09-14 09:20:00 | 93 | 930 | 9300 | 9300 | 9300 | 9300 | 9300 | s_93 | s_930 | s_9300 | 2264-09-14 09:20:00 | 93 | 930 | 9300 | 9300 | 9300 | 9300 | 9300 + s_94 | s_940 | s_9400 | 2267-11-16 07:06:40 | 94 | 940 | 9400 | 9400 | 9400 | 9400 | 9400 | s_94 | s_940 | s_9400 | 2267-11-16 07:06:40 | 94 | 940 | 9400 | 9400 | 9400 | 9400 | 9400 + s_95 | s_950 | s_9500 | 2271-01-16 04:53:20 | 95 | 950 | 9500 | 9500 | 9500 | 9500 | 9500 | s_95 | s_950 | s_9500 | 2271-01-16 04:53:20 | 95 | 950 | 9500 | 9500 | 9500 | 9500 | 9500 + s_96 | s_960 | s_9600 | 2274-03-19 02:40:00 | 96 | 960 | 9600 | 9600 | 9600 | 9600 | 9600 | s_96 | s_960 | s_9600 | 2274-03-19 02:40:00 | 96 | 960 | 9600 | 9600 | 9600 | 9600 | 9600 + s_97 | s_970 | s_9700 | 2277-05-19 12:26:40 | 97 | 970 | 9700 | 9700 | 9700 | 9700 | 9700 | s_97 | s_970 | s_9700 | 2277-05-19 12:26:40 | 97 | 970 | 9700 | 9700 | 9700 | 9700 | 9700 + s_98 | s_980 | s_9800 | 2280-07-19 10:13:20 | 98 | 980 | 9800 | 9800 | 9800 | 9800 | 9800 | s_98 | s_980 | s_9800 | 2280-07-19 10:13:20 | 98 | 980 | 9800 | 9800 | 9800 | 9800 | 9800 + s_99 | s_990 | s_9900 | 2283-09-20 08:00:00 | 99 | 990 | 9900 | 9900 | 9900 | 9900 | 9900 | s_99 | s_990 | s_9900 | 2283-09-20 08:00:00 | 99 | 990 | 9900 | 9900 | 9900 | 9900 | 9900 + s_100 | s_1000 | s_10000 | 2286-11-20 05:46:40 | 100 | 1000 | 10000 | 10000 | 10000 | 10000 | 10000 | s_100 | s_1000 | s_10000 | 2286-11-20 05:46:40 | 100 | 1000 | 10000 | 10000 | 10000 | 10000 | 10000 + s_101 | s_1001 | s_10001 | 2299-11-28 05:46:40 | 101 | 1001 | 10001 | 10001 | 10001 | 10001 | 10001 | s_101 | escaped! | s_1001 | s_10001 | 2299-11-28 05:46:40 | 101 | 1001 | 10001 | 10001 | 10001 | 10001 | 10001 +(101 rows) diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_quote_and_escape/runTest.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_quote_and_escape/runTest.py new file mode 100755 index 0000000000..64b52831d7 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_quote_and_escape/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfMultibyteTwoByteWithQuoteAndEscapeDelimiterData(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_quote_and_escape/sql/query01.sql b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_quote_and_escape/sql/query01.sql new file mode 100755 index 0000000000..a406d3d083 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_quote_and_escape/sql/query01.sql @@ -0,0 +1,3 @@ +-- @description query01 for PXF Multibyte delimiter, 2-byte delim with quote and escape case + +SELECT * from pxf_multibyte_twobyte_withquote_withescape_data ORDER BY n1; \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_delim/__init__.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_delim/__init__.py new file mode 100755 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_delim/expected/query01.ans b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_delim/expected/query01.ans new file mode 100755 index 0000000000..6125f595e3 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_delim/expected/query01.ans @@ -0,0 +1,7 @@ +-- start_ignore +-- end_ignore +-- @description query01 for PXF Multibyte delimiter, 2-byte delim with wrong delim case +SELECT * from pxf_multibyte_twobyte_wrong_delim_data ORDER BY n1; +ERROR: Expected 22 columns in row but found 1 +HINT: Is the `delimiter` value in the format options set correctly? +CONTEXT: External table pxf_multibyte_twobyte_wrong_delim_data diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_delim/runTest.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_delim/runTest.py new file mode 100755 index 0000000000..529dfbfd00 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_delim/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfMultibyteTwoByteWithWrongDelimiterData(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_delim/sql/query01.sql b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_delim/sql/query01.sql new file mode 100755 index 0000000000..aec229cbde --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_delim/sql/query01.sql @@ -0,0 +1,3 @@ +-- @description query01 for PXF Multibyte delimiter, 2-byte delim with wrong delim case + +SELECT * from pxf_multibyte_twobyte_wrong_delim_data ORDER BY n1; \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_eol/__init__.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_eol/__init__.py new file mode 100755 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_eol/expected/query01.ans b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_eol/expected/query01.ans new file mode 100755 index 0000000000..641fef219b --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_eol/expected/query01.ans @@ -0,0 +1,6 @@ +-- @description query01 for PXF Multibyte delimiter, 2-byte delim with wrong eol cases + +SELECT * from pxf_multibyte_twobyte_wrong_eol_data ORDER BY n1; +ERROR: Did not find expected `newline` character when `quote` value was provided +HINT: Check the format options in the table definition. Additionally, make sure there are no extraneous characters between the `quote` and `newline` values in the data. +CONTEXT: External table pxf_multibyte_twobyte_wrong_eol_data \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_eol/runTest.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_eol/runTest.py new file mode 100755 index 0000000000..8c380576b2 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_eol/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfMultibyteTwoByteWithWrongEol(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_eol/sql/query01.sql b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_eol/sql/query01.sql new file mode 100755 index 0000000000..34c0506510 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_eol/sql/query01.sql @@ -0,0 +1,3 @@ +-- @description query01 for PXF Multibyte delimiter, 2-byte delim with wrong eol case + +SELECT * from pxf_multibyte_twobyte_wrong_eol_data ORDER BY n1; \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_eol_5X/__init__.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_eol_5X/__init__.py new file mode 100755 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_eol_5X/expected/query01.ans b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_eol_5X/expected/query01.ans new file mode 100755 index 0000000000..94ffe7f2ec --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_eol_5X/expected/query01.ans @@ -0,0 +1,18 @@ +-- start_ignore +-- end_ignore +-- @description query01 for PXF Multibyte delimiter, 2-byte delim with wrong eol case + +-- start_matchsubs +-- +-- # create a match/subs + +-- m/WARNING/ +-- s/WARNING/GP_IGNORE: WARNING/ +-- +-- end_matchsubs +SELECT * from pxf_multibyte_twobyte_wrong_eol_data ORDER BY n1; +WARNING: unexpected end of file +CONTEXT: External table pxf_multibyte_twobyte_wrong_eol_data + s1 | s2 | s3 | d1 | n1 | n2 | n3 | n4 | n5 | n6 | n7 | s11 | s12 | s13 | d11 | n11 | n12 | n13 | n14 | n15 | n16 | n17 +----+----+----+----+----+----+----+----+----+----+----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+----- +(0 rows) \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_eol_5X/runTest.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_eol_5X/runTest.py new file mode 100755 index 0000000000..40f63aaa90 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_eol_5X/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfMultibyteTwoByteWithWrongEol_5X(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_eol_5X/sql/query01.sql b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_eol_5X/sql/query01.sql new file mode 100755 index 0000000000..9206a6bcfc --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_eol_5X/sql/query01.sql @@ -0,0 +1,11 @@ +-- @description query01 for PXF Multibyte delimiter, 2-byte delim with wrong eol case + +-- start_matchsubs +-- +-- # create a match/subs + +-- m/WARNING/ +-- s/WARNING/GP_IGNORE: WARNING/ +-- +-- end_matchsubs +SELECT * from pxf_multibyte_twobyte_wrong_eol_data ORDER BY n1; \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_escape/__init__.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_escape/__init__.py new file mode 100755 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_escape/expected/query01.ans b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_escape/expected/query01.ans new file mode 100755 index 0000000000..16f972d74b --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_escape/expected/query01.ans @@ -0,0 +1,6 @@ +-- @description query01 for PXF Multibyte delimiter, 2-byte delim with wrong escape cases + +SELECT * from pxf_multibyte_twobyte_wrong_escape_data ORDER BY n1; +ERROR: Found an unescaped quote character +HINT: Is the `escape` value in the format options set correctly? +CONTEXT: External table pxf_multibyte_twobyte_wrong_escape_data \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_escape/runTest.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_escape/runTest.py new file mode 100755 index 0000000000..f119283690 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_escape/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfMultibyteTwoByteWithWrongEscapeDelimiterData(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_escape/sql/query01.sql b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_escape/sql/query01.sql new file mode 100755 index 0000000000..cc3a0fa1f8 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_escape/sql/query01.sql @@ -0,0 +1,3 @@ +-- @description query01 for PXF Multibyte delimiter, 2-byte delim with wrong escape case + +SELECT * from pxf_multibyte_twobyte_wrong_escape_data ORDER BY n1; \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_quote/__init__.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_quote/__init__.py new file mode 100755 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_quote/expected/query01.ans b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_quote/expected/query01.ans new file mode 100755 index 0000000000..81301f5bba --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_quote/expected/query01.ans @@ -0,0 +1,6 @@ +-- @description query01 for PXF Multibyte delimiter, 2-byte delim with wrong quote cases + +SELECT * from pxf_multibyte_twobyte_wrong_quote_data ORDER BY n1; +ERROR: Did not find expected `newline` character when `quote` value was provided +HINT: Check the format options in the table definition. Additionally, make sure there are no extraneous characters between the `quote` and `newline` values in the data. +CONTEXT: External table pxf_multibyte_twobyte_wrong_quote_data \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_quote/runTest.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_quote/runTest.py new file mode 100755 index 0000000000..9144471151 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_quote/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfMultibyteTwoByteWithWrongQuote(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_quote/sql/query01.sql b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_quote/sql/query01.sql new file mode 100755 index 0000000000..b7e85fda95 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_quote/sql/query01.sql @@ -0,0 +1,3 @@ +-- @description query01 for PXF Multibyte delimiter, 2-byte delim with wrong quote case + +SELECT * from pxf_multibyte_twobyte_wrong_quote_data ORDER BY n1; \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_quote_5X/__init__.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_quote_5X/__init__.py new file mode 100755 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_quote_5X/expected/query01.ans b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_quote_5X/expected/query01.ans new file mode 100755 index 0000000000..542264cf99 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_quote_5X/expected/query01.ans @@ -0,0 +1,18 @@ +-- start_ignore +-- end_ignore +-- @description query01 for PXF Multibyte delimiter, 2-byte delim with wrong quote case + +-- start_matchsubs +-- +-- # create a match/subs + +-- m/WARNING/ +-- s/WARNING/GP_IGNORE: WARNING/ +-- +-- end_matchsubs +SELECT * from pxf_multibyte_twobyte_wrong_quote_data ORDER BY n1; +WARNING: unexpected end of file +CONTEXT: External table pxf_multibyte_twobyte_wrong_quote_data + s1 | s2 | s3 | d1 | n1 | n2 | n3 | n4 | n5 | n6 | n7 | s11 | s12 | s13 | d11 | n11 | n12 | n13 | n14 | n15 | n16 | n17 +----+----+----+----+----+----+----+----+----+----+----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+----- +(0 rows) \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_quote_5X/runTest.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_quote_5X/runTest.py new file mode 100755 index 0000000000..77c4ddd878 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_quote_5X/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfMultibyteTwoByteWithWrongQuote_5X(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_quote_5X/sql/query01.sql b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_quote_5X/sql/query01.sql new file mode 100755 index 0000000000..32d7c2a1aa --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_with_wrong_quote_5X/sql/query01.sql @@ -0,0 +1,11 @@ +-- @description query01 for PXF Multibyte delimiter, 2-byte delim with wrong quote case + +-- start_matchsubs +-- +-- # create a match/subs + +-- m/WARNING/ +-- s/WARNING/GP_IGNORE: WARNING/ +-- +-- end_matchsubs +SELECT * from pxf_multibyte_twobyte_wrong_quote_data ORDER BY n1; \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_wrong_formatter/__init__.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_wrong_formatter/__init__.py new file mode 100755 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_wrong_formatter/expected/query01.ans b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_wrong_formatter/expected/query01.ans new file mode 100755 index 0000000000..39514fa0d7 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_wrong_formatter/expected/query01.ans @@ -0,0 +1,8 @@ +-- start_ignore +-- m/DETAIL/ +-- s/DETAIL/CONTEXT/ +-- end_ignore +-- @description query01 for PXF Multibyte delimiter, 2-byte delim cases with wrong formatter provided +SELECT * from pxf_multibyte_twobyte_wrongformatter_data ORDER BY n1; +ERROR: Record has 1 fields but the schema size is 22 (seg1 slice1 127.0.0.1:6003 pid=51272) +CONTEXT: External table pxf_multibyte_twobyte_wrongformatter_data diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_wrong_formatter/runTest.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_wrong_formatter/runTest.py new file mode 100755 index 0000000000..f4808fcd6a --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_wrong_formatter/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfMultibyteTwoByteWrongFormatter(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_wrong_formatter/sql/query01.sql b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_wrong_formatter/sql/query01.sql new file mode 100755 index 0000000000..427c002b80 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/two_byte_wrong_formatter/sql/query01.sql @@ -0,0 +1,3 @@ +-- @description query01 for PXF Multibyte delimiter, 2-byte delim cases with wrong formatter provided + +SELECT * from pxf_multibyte_twobyte_wrongformatter_data ORDER BY n1; \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/wrong_profile/__init__.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/wrong_profile/__init__.py new file mode 100755 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/wrong_profile/expected/query01.ans b/automation/tincrepo/main/pxf/features/multibyte_delimiter/wrong_profile/expected/query01.ans new file mode 100755 index 0000000000..fce6d68acc --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/wrong_profile/expected/query01.ans @@ -0,0 +1,8 @@ +-- start_ignore +-- end_ignore +-- @description query01 for PXF Multibyte delimiter, wrong profile case + +SELECT * from pxf_multibyte_wrong_profile ORDER BY age; +ERROR: The "pxfdelimited_import" formatter only works with *:text or *:csv profiles. +HINT: Please double check the profile option in the external table definition. +CONTEXT: External table pxf_multibyte_wrong_profile \ No newline at end of file diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/wrong_profile/runTest.py b/automation/tincrepo/main/pxf/features/multibyte_delimiter/wrong_profile/runTest.py new file mode 100755 index 0000000000..bcb30405f1 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/wrong_profile/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfMultibyteWrongProfile(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' diff --git a/automation/tincrepo/main/pxf/features/multibyte_delimiter/wrong_profile/sql/query01.sql b/automation/tincrepo/main/pxf/features/multibyte_delimiter/wrong_profile/sql/query01.sql new file mode 100755 index 0000000000..db42ad5fac --- /dev/null +++ b/automation/tincrepo/main/pxf/features/multibyte_delimiter/wrong_profile/sql/query01.sql @@ -0,0 +1,3 @@ +-- @description query01 for PXF Multibyte delimiter, wrong profile case + +SELECT * from pxf_multibyte_wrong_profile ORDER BY age; \ No newline at end of file diff --git a/concourse/pipelines/templates/build_pipeline-tpl.yml b/concourse/pipelines/templates/build_pipeline-tpl.yml index e5775b519e..7b5706474e 100644 --- a/concourse/pipelines/templates/build_pipeline-tpl.yml +++ b/concourse/pipelines/templates/build_pipeline-tpl.yml @@ -43,6 +43,7 @@ groups: - Test PXF-GP[[gp_ver]]-MINIO-NO-IMPERS on RHEL7 - Test PXF-GP[[gp_ver]]-HDP2-SECURE-MULTI-IMPERS on RHEL7 - Test PXF-GP[[gp_ver]]-HDP2-SECURE-MULTI-NO-IMPERS on RHEL7 + - Test PXF-GP[[gp_ver]]-HDP2-Upgrade-Extension on RHEL7 {% set gp_ver = None %} - Compatibility Gate for PXF-GP - Promote PXF-GP5 and PXF-GP6 Artifacts @@ -89,6 +90,7 @@ groups: - Test PXF-GP[[gp_ver]]-MINIO-NO-IMPERS on RHEL7 - Test PXF-GP[[gp_ver]]-HDP2-SECURE-MULTI-IMPERS on RHEL7 - Test PXF-GP[[gp_ver]]-HDP2-SECURE-MULTI-NO-IMPERS on RHEL7 + - Test PXF-GP[[gp_ver]]-HDP2-Upgrade-Extension on RHEL7 {% set gp_ver = None %} - name: Backwards Compatibility jobs: @@ -533,6 +535,15 @@ resources: json_key: ((concourse-gcs-resources-service-account-key)) regexp: ((ud/pxf/common/releng-drop-path))/gpdb6/pxf-gp6-5.(.*)-2.el7.x86_64.rpm +## ---------- PXF 6.6 (for GPDB 6) Artifact --------------- +# This is for extension upgrade testing. PXF 6.6.X contains extension version 2.0. PXF 6.7.+ contains version 2.1 +- name: pxf6_6_gp6_rhel7_released + type: gcs + source: + bucket: ((ud/pxf/prod/releng-drop-bucket-name)) + json_key: ((concourse-gcs-resources-service-account-key)) + regexp: ((ud/pxf/common/releng-drop-path))/gpdb6/pxf-gp6-(6.6.*)-2.el7.x86_64.rpm + ## ---------- PXF Build Artifacts ---------- {% set gp_ver = None %} @@ -978,6 +989,34 @@ jobs: HADOOP_CLIENT: CDH SECRET_ACCESS_KEY: ((tf-machine-secret-access-key)) +- name: Test PXF-GP[[gp_ver]]-HDP2-Upgrade-Extension on RHEL7 + plan: + - in_parallel: + - get: pxf_src + passed: [Testing Gate for PXF-GP] + trigger: true + - get: pxf_package + resource: pxf6_6_gp6_rhel7_released # for upgrade test + - get: pxf_tarball + resource: pxf_gp[[gp_ver]]_tarball_rhel7 + passed: [Testing Gate for PXF-GP] + - get: gpdb_package + resource: gpdb[[gp_ver]]_rhel7_rpm_latest-0 + passed: [Testing Gate for PXF-GP] + - get: gpdb[[gp_ver]]-pxf-dev-centos7-image + - get: pxf-automation-dependencies + - get: singlecluster + resource: singlecluster-hdp2 + - task: Test PXF-GP[[gp_ver]]-HDP2-New-Extension on RHEL7 + file: pxf_src/concourse/tasks/upgrade_extension.yml + image: gpdb[[gp_ver]]-pxf-dev-centos7-image + params: + ACCESS_KEY_ID: ((tf-machine-access-key-id)) + GP_VER: [[gp_ver]] + GROUP: pxfExtensionVersion2 + SECOND_GROUP: upgradePxfExtension + SECRET_ACCESS_KEY: ((tf-machine-secret-access-key)) + ## ---------- FILE tests ----------------- - name: Test PXF-GP[[gp_ver]]-FILE-NO-IMPERS on RHEL7 @@ -1512,6 +1551,7 @@ jobs: - Test PXF-GP[[gp_ver]]-MINIO-NO-IMPERS on RHEL7 - Test PXF-GP[[gp_ver]]-HDP2-SECURE-MULTI-IMPERS on RHEL7 - Test PXF-GP[[gp_ver]]-HDP2-SECURE-MULTI-NO-IMPERS on RHEL7 + - Test PXF-GP[[gp_ver]]-HDP2-Upgrade-Extension on RHEL7 trigger: true {% set gp_ver = None %} {% set gp_ver = 5 %} diff --git a/concourse/scripts/cli/test_reset_init.sh b/concourse/scripts/cli/test_reset_init.sh index dd9dfcd8f6..1ba0531999 100755 --- a/concourse/scripts/cli/test_reset_init.sh +++ b/concourse/scripts/cli/test_reset_init.sh @@ -94,7 +94,7 @@ PXF initialized successfully on ${num_hosts} out of ${num_hosts} hosts" control_file_content=\ "directory = '/usr/local/pxf-gp6/gpextable/' -default_version = '2.0' +default_version = '2.1' comment = 'Extension which allows to access unmanaged data' module_pathname = '/usr/local/pxf-gp6/gpextable/pxf' superuser = true diff --git a/concourse/scripts/pxf_common.bash b/concourse/scripts/pxf_common.bash index 7764bf5f4d..1db48a4624 100755 --- a/concourse/scripts/pxf_common.bash +++ b/concourse/scripts/pxf_common.bash @@ -84,6 +84,66 @@ function set_env() { export TIMEFORMAT=$'\e[4;33mIt took %R seconds to complete this step\e[0m'; } +function run_pxf_automation() { + # Let's make sure that automation/singlecluster directories are writeable + chmod a+w pxf_src/automation /singlecluster || true + find pxf_src/automation/tinc* -type d -exec chmod a+w {} \; + + local extension_name="pxf" + if [[ ${USE_FDW} == "true" ]]; then + extension_name="pxf_fdw" + fi + + #TODO: remove once exttable tests with GP7 are set + if [[ ${GROUP} == fdw_gpdb_schedule ]]; then + extension_name="pxf_fdw" + fi + + su gpadmin -c " + source '${GPHOME}/greenplum_path.sh' && + psql -p ${PGPORT} -d template1 -c 'CREATE EXTENSION IF NOT EXISTS ${extension_name}' + " + # prepare certification output directory + mkdir -p certification + chmod a+w certification + + cat > ~gpadmin/run_pxf_automation_test.sh <<-EOF + #!/usr/bin/env bash + set -exo pipefail + + source ~gpadmin/.pxfrc + + export PATH=\$PATH:${GPHD_ROOT}/bin + export GPHD_ROOT=${GPHD_ROOT} + export PXF_HOME=${PXF_HOME} + export PGPORT=${PGPORT} + export USE_FDW=${USE_FDW} + + cd pxf_src/automation + time make GROUP=${GROUP} test + + # if the test is successful, create certification file + gpdb_build_from_sql=\$(psql -c 'select version()' | grep Greenplum | cut -d ' ' -f 6,8) + gpdb_build_clean=\${gpdb_build_from_sql%)} + pxf_version=\$(< ${PXF_HOME}/version) + echo "GPDB-\${gpdb_build_clean/ commit:/-}-PXF-\${pxf_version}" > "${PWD}/certification/certification.txt" + echo + echo '****************************************************************************************************' + echo "Wrote certification : \$(< ${PWD}/certification/certification.txt)" + echo '****************************************************************************************************' + EOF + + chown gpadmin:gpadmin ~gpadmin/run_pxf_automation_test.sh + chmod a+x ~gpadmin/run_pxf_automation_test.sh + + if [[ ${ACCEPTANCE} == true ]]; then + echo 'Acceptance test pipeline' + exit 1 + fi + + su gpadmin -c ~gpadmin/run_pxf_automation_test.sh +} + function run_regression_test() { ln -s "${PWD}/gpdb_src" ~gpadmin/gpdb_src cat > ~gpadmin/run_regression_test.sh <<-EOF @@ -392,6 +452,19 @@ function setup_impersonation() { fi } +function setup_hadoop() { + local hdfsrepo=$1 + + [[ -z ${GROUP} ]] && return 0 + + export SLAVES=1 + setup_impersonation "${hdfsrepo}" + if grep 'hadoop-3' "${hdfsrepo}/versions.txt"; then + adjust_for_hadoop3 "${hdfsrepo}" + fi + start_hadoop_services "${hdfsrepo}" +} + function adjust_for_hadoop3() { local GPHD_ROOT=${1} diff --git a/concourse/scripts/test.bash b/concourse/scripts/test.bash index 15856aadce..227a893298 100755 --- a/concourse/scripts/test.bash +++ b/concourse/scripts/test.bash @@ -70,65 +70,6 @@ function run_pg_regress() { su gpadmin -c ~gpadmin/run_pxf_automation_test.sh } -function run_pxf_automation() { - # Let's make sure that automation/singlecluster directories are writeable - chmod a+w pxf_src/automation /singlecluster || true - find pxf_src/automation/tinc* -type d -exec chmod a+w {} \; - - local extension_name="pxf" - if [[ ${USE_FDW} == "true" ]]; then - extension_name="pxf_fdw" - fi - - #TODO: remove once exttable tests with GP7 are set - if [[ ${GROUP} == fdw_gpdb_schedule ]]; then - extension_name="pxf_fdw" - fi - - su gpadmin -c " - source '${GPHOME}/greenplum_path.sh' && - psql -p ${PGPORT} -d template1 -c 'CREATE EXTENSION ${extension_name}' - " - # prepare certification output directory - mkdir -p certification - chmod a+w certification - - cat > ~gpadmin/run_pxf_automation_test.sh <<-EOF - #!/usr/bin/env bash - set -exo pipefail - - source ~gpadmin/.pxfrc - - export PATH=\$PATH:${GPHD_ROOT}/bin - export GPHD_ROOT=${GPHD_ROOT} - export PXF_HOME=${PXF_HOME} - export PGPORT=${PGPORT} - export USE_FDW=${USE_FDW} - - cd pxf_src/automation - time make GROUP=${GROUP} test - - # if the test is successful, create certification file - gpdb_build_from_sql=\$(psql -c 'select version()' | grep Greenplum | cut -d ' ' -f 6,8) - gpdb_build_clean=\${gpdb_build_from_sql%)} - pxf_version=\$(< ${PXF_HOME}/version) - echo "GPDB-\${gpdb_build_clean/ commit:/-}-PXF-\${pxf_version}" > "${PWD}/certification/certification.txt" - echo - echo '****************************************************************************************************' - echo "Wrote certification : \$(< ${PWD}/certification/certification.txt)" - echo '****************************************************************************************************' - EOF - - chown gpadmin:gpadmin ~gpadmin/run_pxf_automation_test.sh - chmod a+x ~gpadmin/run_pxf_automation_test.sh - - if [[ ${ACCEPTANCE} == true ]]; then - echo 'Acceptance test pipeline' - exit 1 - fi - - su gpadmin -c ~gpadmin/run_pxf_automation_test.sh -} function generate_extras_fat_jar() { mkdir -p /tmp/fatjar @@ -248,7 +189,7 @@ function _main() { fi # Certification jobs might install non-latest PXF, make sure automation code corresponds to what is installed - if [[ -f ${PXF_HOME}/commit.sha ]]; then + if [[ -f ${PXF_HOME}/commit.sha ]] && [[ ${ADJUST_AUTOMATION} != false ]]; then adjust_automation_code else echo "WARNING: no commit.sha file is found in PXF_HOME=${PXF_HOME}" diff --git a/concourse/scripts/test_pxf.bash b/concourse/scripts/test_pxf.bash index 518792a60c..1ed2edbe3f 100755 --- a/concourse/scripts/test_pxf.bash +++ b/concourse/scripts/test_pxf.bash @@ -125,19 +125,6 @@ function configure_mapr_dependencies() { sed -i 's|8020|7222|' pxf_src/automation/src/test/resources/sut/default.xml } -function setup_hadoop() { - local hdfsrepo=$1 - - [[ -z ${GROUP} ]] && return 0 - - export SLAVES=1 - setup_impersonation "${hdfsrepo}" - if grep 'hadoop-3' "${hdfsrepo}/versions.txt"; then - adjust_for_hadoop3 "${hdfsrepo}" - fi - start_hadoop_services "${hdfsrepo}" -} - function configure_sut() { AMBARI_DIR=$(find /tmp/build/ -name ambari_env_files) if [[ -n $AMBARI_DIR ]]; then diff --git a/concourse/scripts/test_upgrade_extension.bash b/concourse/scripts/test_upgrade_extension.bash new file mode 100755 index 0000000000..f94b733651 --- /dev/null +++ b/concourse/scripts/test_upgrade_extension.bash @@ -0,0 +1,96 @@ +#!/bin/bash + +set -exo pipefail + +CWDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +# make sure GP_VER is set so that we know what PXF_HOME will be +: "${GP_VER:?GP_VER must be set}" +# We run 2 sets of automation tests, once before upgrading and once after +# make sure that SECOND_GROUP is set so that we actually have a set of tests +# to run against after we upgrade PXF. +: "${SECOND_GROUP:?SECOND_GROUP must be set}" + +# set our own GPHOME for RPM-based installs before sourcing common script +export GPHOME=/usr/local/greenplum-db +export PXF_HOME=/usr/local/pxf-gp${GP_VER} +export PXF_BASE_DIR=${PXF_BASE_DIR:-$PXF_HOME} + +source "${CWDIR}/pxf_common.bash" + +export GOOGLE_PROJECT_ID=${GOOGLE_PROJECT_ID:-data-gpdb-ud} +export JAVA_TOOL_OPTIONS=-Dfile.encoding=UTF8 +export HADOOP_HEAPSIZE=512 +export YARN_HEAPSIZE=512 +export GPHD_ROOT=/singlecluster +export PGPORT=${PGPORT:-5432} + +function upgrade_pxf() { + existing_pxf_version=$(cat "${PXF_HOME}"/version) + echo "Stopping PXF ${existing_pxf_version}" + su gpadmin -c "${PXF_HOME}/bin/pxf version && ${PXF_HOME}/bin/pxf cluster stop" + + echo "Installing Newer Version of PXF 6" + install_pxf_tarball + + echo "Check the PXF 6 version" + su gpadmin -c "${PXF_HOME}/bin/pxf version" + + echo "Register the PXF extension into Greenplum" + su gpadmin -c "GPHOME=${GPHOME} ${PXF_HOME}/bin/pxf cluster register" + + if [[ "${PXF_BASE_DIR}" != "${PXF_HOME}" ]]; then + echo "Prepare PXF in ${PXF_BASE_DIR}" + PXF_BASE="${PXF_BASE_DIR}" "${PXF_HOME}"/bin/pxf cluster prepare + echo "export PXF_BASE=${PXF_BASE_DIR}" >> ~gpadmin/.bashrc + fi + updated_pxf_version=$(cat "${PXF_HOME}"/version) + + echo "Starting PXF ${updated_pxf_version}" + + if [[ "${existing_pxf_version}" > "${updated_pxf_version}" ]]; then + echo "Existing version of PXF (${existing_pxf_version}) is greater than or equal to the new version (${updated_pxf_version})" + fi + + su gpadmin -c "PXF_BASE=${PXF_BASE_DIR} ${PXF_HOME}/bin/pxf cluster start" + + # the new version of PXF brought in a new version of the extension. For databases that already had PXF installed, + # we need to explicitly upgrade the PXF extension to the new version + echo "ALTER EXTENSION pxf UPDATE - for multibyte delimiter tests" + + su gpadmin <<'EOSU' + source ${GPHOME}/greenplum_path.sh && + psql --no-align --tuples-only --command "SELECT datname FROM pg_catalog.pg_database WHERE datname != 'template0';" | while read -r dbname; do + echo -n "checking if database '${dbname}' has PXF extension installed... " + if ! psql --dbname="${dbname}" --no-align --tuples-only --command "SELECT extname FROM pg_catalog.pg_extension WHERE extname = 'pxf'" | grep . &>/dev/null; then + echo "skipping database '${dbname}'" + continue + fi + echo "updating PXF extension in database '${dbname}'" + psql --dbname="${dbname}" --set ON_ERROR_STOP=on --command "ALTER EXTENSION pxf UPDATE;" + done +EOSU + +} + +function _main() { + + # Upgrade to latest PXF + echo + echo + echo '****************************************************************************************************' + echo "* Upgrading PXF *" + echo '****************************************************************************************************' + echo + echo + + # Upgrade from older version of PXF to newer version of PXF present in the tarball + upgrade_pxf + + # Run tests after upgrading PXF + # second time running automation so we should be running the second group + GROUP=${SECOND_GROUP} + run_pxf_automation +} + +_main diff --git a/concourse/tasks/test.yml b/concourse/tasks/test.yml index af443573a1..b9d5c8ba5b 100644 --- a/concourse/tasks/test.yml +++ b/concourse/tasks/test.yml @@ -34,6 +34,7 @@ params: PROTOCOL: PG_REGRESS: USE_FDW: false + ADJUST_AUTOMATION: true run: path: pxf_src/concourse/scripts/test.bash diff --git a/concourse/tasks/test_certification.yml b/concourse/tasks/test_certification.yml index a6320d97fd..41c285ca35 100644 --- a/concourse/tasks/test_certification.yml +++ b/concourse/tasks/test_certification.yml @@ -26,6 +26,7 @@ params: RUN_JDK_VERSION: 8 PROTOCOL: PG_REGRESS: + ADJUST_AUTOMATION: true run: path: pxf_src/concourse/scripts/test.bash diff --git a/concourse/tasks/upgrade_extension.yml b/concourse/tasks/upgrade_extension.yml new file mode 100644 index 0000000000..89799feb3d --- /dev/null +++ b/concourse/tasks/upgrade_extension.yml @@ -0,0 +1,34 @@ +platform: linux + +image_resource: + type: registry-image + +inputs: + - name: pxf_package + - name: pxf_tarball + - name: pxf_src + - name: gpdb_package + - name: singlecluster + optional: true + - name: pxf-automation-dependencies + optional: true + +params: + GP_VER: + GROUP: pxfExtensionVersion2 + SECOND_GROUP: pxfCxtensionVersion2_1 + HADOOP_CLIENT: HDP + IMPERSONATION: true + ADJUST_AUTOMATION: false + PGPORT: 5432 + PXF_BASE_DIR: + PROTOCOL: + USE_FDW: false + +run: + path: sh + args: + - -exc + - | + pxf_src/concourse/scripts/test.bash || exit 1 + pxf_src/concourse/scripts/test_upgrade_extension.bash diff --git a/external-table/Makefile b/external-table/Makefile index 7ed58bec57..626ced4d32 100644 --- a/external-table/Makefile +++ b/external-table/Makefile @@ -1,7 +1,7 @@ EXTENSION = pxf -DATA = pxf--2.0.sql pxf--1.0--2.0.sql pxf--1.0.sql +DATA = pxf--2.1.sql pxf--2.0--2.1.sql pxf--2.1--2.0.sql pxf--2.0.sql pxf--1.0--2.0.sql pxf--1.0.sql MODULE_big = pxf -OBJS = src/pxfprotocol.o src/pxfbridge.o src/pxfuriparser.o src/libchurl.o src/pxfutils.o src/pxfheaders.o src/gpdbwritableformatter.o src/pxffilters.o +OBJS = src/pxfprotocol.o src/pxfbridge.o src/pxfuriparser.o src/libchurl.o src/pxfutils.o src/pxfheaders.o src/gpdbwritableformatter.o src/pxfdelimited_formatter.o src/pxffilters.o REGRESS = setup pxf pxfinvalid SHLIB_LINK += -lcurl @@ -20,9 +20,7 @@ stage: pxf.so mkdir -p build/stage/gpextable install -c -m 755 pxf.so build/stage/gpextable/pxf.so install -c -m 644 pxf.control build/stage/gpextable/ - install -c -m 644 pxf--1.0.sql build/stage/gpextable/ - install -c -m 644 pxf--2.0.sql build/stage/gpextable/ - install -c -m 644 pxf--1.0--2.0.sql build/stage/gpextable/ + install -c -m 644 $(DATA) build/stage/gpextable/ @echo "gpdb.version=$(GP_VERSION)" > build/stage/gpextable/metadata @echo "gpdb.major-version=$(GP_MAJORVERSION)" >> build/stage/gpextable/metadata diff --git a/external-table/pxf--2.0--2.1.sql b/external-table/pxf--2.0--2.1.sql new file mode 100644 index 0000000000..4b5a36efef --- /dev/null +++ b/external-table/pxf--2.0--2.1.sql @@ -0,0 +1,7 @@ +------------------------------------------------------------------ +-- PXF Protocol/Formatters +------------------------------------------------------------------ + +CREATE OR REPLACE FUNCTION pg_catalog.pxfdelimited_import() RETURNS record +AS 'MODULE_PATHNAME', 'pxfdelimited_import' +LANGUAGE C STABLE; diff --git a/external-table/pxf--2.1--2.0.sql b/external-table/pxf--2.1--2.0.sql new file mode 100644 index 0000000000..130bfd8568 --- /dev/null +++ b/external-table/pxf--2.1--2.0.sql @@ -0,0 +1,9 @@ +------------------------------------------------------------------ +-- PXF Protocol/Formatters +------------------------------------------------------------------ + +-- remove the function from the extension +ALTER EXTENSION pxf DROP FUNCTION pg_catalog.pxfdelimited_import(); + +-- remove the function itself from the catalog +DROP FUNCTION pg_catalog.pxfdelimited_import(); diff --git a/external-table/pxf--2.1.sql b/external-table/pxf--2.1.sql new file mode 100644 index 0000000000..433e129324 --- /dev/null +++ b/external-table/pxf--2.1.sql @@ -0,0 +1,32 @@ +------------------------------------------------------------------ +-- PXF Protocol/Formatters +------------------------------------------------------------------ + +CREATE OR REPLACE FUNCTION pg_catalog.pxf_write() RETURNS integer +AS 'MODULE_PATHNAME', 'pxfprotocol_export' +LANGUAGE C STABLE; + +CREATE OR REPLACE FUNCTION pg_catalog.pxf_read() RETURNS integer +AS 'MODULE_PATHNAME', 'pxfprotocol_import' +LANGUAGE C STABLE; + +CREATE OR REPLACE FUNCTION pg_catalog.pxf_validate() RETURNS void +AS 'MODULE_PATHNAME', 'pxfprotocol_validate_urls' +LANGUAGE C STABLE; + +CREATE OR REPLACE FUNCTION pg_catalog.pxfwritable_import() RETURNS record +AS 'MODULE_PATHNAME', 'gpdbwritableformatter_import' +LANGUAGE C STABLE; + +CREATE OR REPLACE FUNCTION pg_catalog.pxfwritable_export(record) RETURNS bytea +AS 'MODULE_PATHNAME', 'gpdbwritableformatter_export' +LANGUAGE C STABLE; + +CREATE OR REPLACE FUNCTION pg_catalog.pxfdelimited_import() RETURNS record +AS 'MODULE_PATHNAME', 'pxfdelimited_import' +LANGUAGE C STABLE; + +CREATE TRUSTED PROTOCOL pxf ( + writefunc = pxf_write, + readfunc = pxf_read, + validatorfunc = pxf_validate); diff --git a/external-table/pxf.control b/external-table/pxf.control index da08dbc14c..ca0ae18a07 100644 --- a/external-table/pxf.control +++ b/external-table/pxf.control @@ -1,5 +1,5 @@ directory = 'extension' -default_version = '2.0' +default_version = '2.1' comment = 'Extension which allows to access unmanaged data' module_pathname = '$libdir/pxf' superuser = true diff --git a/external-table/src/pxfdelimited_formatter.c b/external-table/src/pxfdelimited_formatter.c new file mode 100644 index 0000000000..ecf9f52b1e --- /dev/null +++ b/external-table/src/pxfdelimited_formatter.c @@ -0,0 +1,719 @@ +// Portions Copyright (c) 2023 VMware, Inc. or its affiliates. + +#include "pxfdelimited_formatter.h" +#include "stdio.h" + +PG_FUNCTION_INFO_V1(pxfdelimited_import); +Datum pxfdelimited_import(PG_FUNCTION_ARGS); + +#define is_even(cnt) (cnt % 2 == 0) + +#if PG_VERSION_NUM < 90400 +// Copied from tupdesc.h (6.x), since this is not present in GPDB 5 +/* Accessor for the i'th attribute of tupdesc. */ +#define TupleDescAttr(tupdesc, i) ((tupdesc)->attrs[(i)]) +#endif + +/** + * Helper function to count the number of occurrences of a given character (from right to left) given a pointer to the current location in the string + * and a pointer to the beginning of the string + * + * @param p where to start searching + * @param left_border where to stop searching + * @param val the char to count + * @return the number of continuous occurrences found of the given character + */ +static int +count_preceding_occurrences_of_char(char *p, char *left_border, char val) +{ + int count = 0; + while (p >= left_border && *p == val) + { + ++count; + --p; + } + return count; +} + +/** + * Find the first occurrence of the target given a pointer to the beginning of the string and a pointer to the end of the string + * @param target the character to find + * @param left_border where to start searching + * @param right_border where to stop searching + * @param myData struct containing formatter option information + * @return pointer to the character immediately after the first instance of the target + */ +static char * +find_first_ins_for_multiline(char *target, char *left_border, char *right_border, pxfdelimited_state *myData) +{ + char *t = target; + char *ret = NULL; + + // we assume that the target will only be delimiter or eol + if (myData->quote != NULL) + { + if (strcmp(target, myData->delimiter) == 0) + { + t = myData->quote_delimiter; + } + else + { + t = myData->quote_eol; + } + } + + char *start_pos = left_border; + while (1) + { + // find the first instance of the target value + char *p = strstr(start_pos, t); + if (p == NULL || p > right_border - strlen(t)) + { + // nothing found or the entire target is not within the bounds dictated + break; + } + + int escape_count = 0; + + // make sure that the value found is not an escaped representation of the given target + if (myData->escape != NULL) + { + escape_count = count_preceding_occurrences_of_char(p-1, left_border, *myData->escape); + } + + // if the count is even, then the value found is not escaped + if (is_even(escape_count)) + { + ret = p; + break; + } + else + { + // the value found was an escaped representation of the given target, thus continue searching + start_pos = p + strlen(t); + continue; + } + } + + // we found a 'quote+eol', 'ret' is pointing to quote. we should return the data border that just after the quote + if (myData->quote != NULL && strcmp(target, myData->eol) == 0 && ret) + { + ++ret; + } + + return ret; +} + +/** + * Set the values in the pxfdelimited_state struct with all our formatter options + * + * This function assumes that the values for delimiter, quote and escape are stored in the + * server encoding. It converts the values to the table encoding and writes it into the + * pxfdelimited_state struct + * + * @param fcinfo + * @param fmt_state + */ +static void +get_config(FunctionCallInfo fcinfo, pxfdelimited_state *fmt_state) +{ + fmt_state->delimiter = NULL; + fmt_state->eol = NULL; + fmt_state->quote = NULL; + fmt_state->escape = NULL; + + int nargs = FORMATTER_GET_NUM_ARGS(fcinfo); + + for (int i = 1; i <= nargs; i++) + { + char *key = FORMATTER_GET_NTH_ARG_KEY(fcinfo, i); + char *value = FORMATTER_GET_NTH_ARG_VAL(fcinfo, i); + + if (strcmp(key, "delimiter") == 0) + { + fmt_state->delimiter = value; + } + else if (strcmp(key, "newline") == 0) + { + if (pg_strcasecmp(value, "lf") == 0) + { + fmt_state->eol = "\n"; + } + else if (pg_strcasecmp(value, "cr") == 0) + { + fmt_state->eol = "\r"; + } + else if (pg_strcasecmp(value, "crlf") == 0) + { + fmt_state->eol = "\r\n"; + } + else + { + // GPDB COPY command allows for a NEWLINE option with the following 3 values: LF, CRLF or CR. + // When set, these values get interpolated correctly + // Emulate this behavior by only allowing these three values for the multibyte delimiter formatter + // Warning: this requires that the entire file has lines terminated in the same way. + // (LF is used throughout the entire file) + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("NEWLINE can only be LF, CRLF, or CR"))); + } + } + else if (strcmp(key, "quote") == 0) + { + fmt_state->quote = value; + } + else if (strcmp(key, "escape") == 0) + { + fmt_state->escape = value; + } + } + + if (fmt_state->eol == NULL) + { + // while GPDB COPY framework has the ability to read the first row of data to dynamically determine + // the newline type, we cannot do that here. Instead, assume a default value of LF + fmt_state->eol = "\n"; + } + + // with quote, we must also have escape set it to the default if it is not provided. This is similar behavior to COPY + if (fmt_state->quote != NULL) + { + if (strlen(fmt_state->quote) != 1) + { + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("quote must be a single one-byte character"))); + } + + if (fmt_state->escape == NULL) + { + // postgres defaults the escape value to be the same as quote if it is not set: https://www.postgresql.org/docs/9.4/sql-copy.html + fmt_state->escape = fmt_state->quote; + } + + fmt_state->quote_delimiter = palloc(strlen(fmt_state->delimiter) + 2); + sprintf(fmt_state->quote_delimiter, "%c%s", *fmt_state->quote, fmt_state->delimiter); + + fmt_state->quote_eol = palloc(strlen(fmt_state->eol) + 2); + sprintf(fmt_state->quote_eol, "%c%s", *fmt_state->quote, fmt_state->eol); + } + + if (fmt_state->delimiter == NULL || *fmt_state->delimiter == '\0') + { + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("missing delimiter option"), + errhint("Please specify a delimiter value in the table definition."))); + } + + if (fmt_state->escape != NULL && strlen(fmt_state->escape) != 1) + { + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("escape must be a single one-byte character"))); + } +} + +/** + * Initialize the pxfdelimited_state struct + * @param fcinfo + * @return + */ +static pxfdelimited_state * +new_pxfdelimited_state(FunctionCallInfo fcinfo) +{ + pxfdelimited_state *fmt_state; + TupleDesc desc = FORMATTER_GET_TUPDESC(fcinfo); + + fmt_state = (pxfdelimited_state*)palloc0(sizeof(pxfdelimited_state)); + fmt_state->desc = desc; + + int nColumns = desc->natts; + fmt_state->values = (Datum*)palloc(sizeof(Datum) * nColumns); + fmt_state->nulls = (bool*)palloc(sizeof(bool) * nColumns); + + fmt_state->conv_functions = FORMATTER_GET_CONVERSION_FUNCS(fcinfo); + fmt_state->typioparams = FORMATTER_GET_TYPIOPARAMS(fcinfo); + + get_config(fcinfo, fmt_state); + + fmt_state->nColumns = nColumns; + + fmt_state->external_encoding = FORMATTER_GET_EXTENCODING(fcinfo); + fmt_state->enc_conversion_proc = ((FormatterData*) fcinfo->context)->fmt_conversion_proc; + + fmt_state->saw_delim = false; + fmt_state->saw_eol = false; + return fmt_state; +} + +/** + * Helper function to handle any escaping that needs to be done. We only call this function + * on the column data itself + * @param start pointer to the beginning of the buffer + * @param len total length of the buffer + * @param myData struct containing formatter options + * @return a new buffer containing a copy of the string that has been properly unescaped + */ +static char * +unescape_data(char *start, int len, pxfdelimited_state *myData) +{ + char *buf = palloc(len + 1); + int j = 0; + int eol_len = strlen(myData->eol); + int delimiter_len = strlen(myData->delimiter); + + for (int i = 0; i < len;) + { + if (start[i] == *myData->escape) + { + // when the data is quoted, we do not worry about special characters + // so we only need to unescape 'escape' itself and the quote value + // the examples below assume escape=\ and quote=" and delimiter=, + if (myData->quote != NULL) + { + // before: `\"hello, my name is jane\" she said. let's escape something \\` + // after: `"hello, my name is jane" she said. let's escape something \` + if (i + 1 < len && (start[i+1] == *myData->escape || start[i+1] == *myData->quote)) + { + buf[j++] = start[i+1]; + i = i + 2; + } + else // before: \a, after: \a + { + buf[j++] = start[i++]; + } + } + // if the data is not quoted, then we need to handle delimiter, eol and `escape` itself + else + { + if (i + 1 < len && start[i+1] == *myData->escape) + { + buf[j++] = start[i+1]; + i = i + 2; + } + // before: `the new line character is \\n` + // after: `the new line character is \n` + else if (i + eol_len < len && memcmp(myData->eol, start + i + 1, eol_len) == 0 ) + { + memcpy(buf + j, myData->eol, eol_len); + i = i + eol_len + 1; + j += eol_len; + } + // any delimiter value here is part of the data itself and was escaped so unescape them here + // before: `hi\, this is jane` + // after: `hi, this is jane` + else if (i + delimiter_len < len && memcmp(myData->delimiter, start + i + 1, delimiter_len) == 0) + { + memcpy(buf + j, myData->delimiter, delimiter_len); + i = i + delimiter_len + 1; + j += delimiter_len; + } + else // we permit this, escape nothing + { + buf[j++] = start[i++]; + } + } + } + // the data can only start with a quote character if myData->quote == NULL + else if (myData->quote != NULL && start[i] == *myData->quote) + { + ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION), + errmsg("Found an unescaped quote character"), + errhint("Is the `escape` value in the format options set correctly?"))); + } + else + { + buf[j++] = start[i++]; + } + } + + buf[j] = 0; + + return buf; +} + +/** + * we count the quote, we need every column with two quote, return the pos of eol + * This function ensures that the data in the buffer is indeed a complete row that can be parsed. + * This function is only called when there is a quote value + * @param data the pointer to the start of the buffer + * @param data_border the pointer to where the line/row should end + * @param myData the struct containing formatter options + * @return + */ +static char * +find_whole_line(char *data, char *data_border, pxfdelimited_state *myData) { + int column_cnt = myData->desc->natts; + int delimiter_len = strlen(myData->delimiter); + int eol_len = strlen(myData->eol); + + char *p = data; + for (int i = 0; i < column_cnt; ++i) + { + // first, we check the left quote + // if there is no left quote, then there is something wrong with the data + if (*p != *myData->quote) + { + return NULL; + } + + ++p; + while (1) + { + // read until we see a quote value + while (p < data_border && *p != *myData->quote) + { + ++p; + } + + // if we didn't find the right quote in the buf + if (p >= data_border) + { + return NULL; + } + + // make sure that the quote we found is not escaped + if (myData->escape != NULL) + { + int cnt = count_preceding_occurrences_of_char(p-1, data, *myData->escape); + // if the count is odd, then the quote found is escaped so continue until we find the next one + if (!is_even(cnt)) + { + ++p; + continue; + } + } + + break; + } + + // we needn't check delimiter after the last column + if (i == column_cnt - 1) + { + break; + } + + // here should be a delimiter + ++p; + if (p > data_border - delimiter_len || + (p <= data_border - delimiter_len && memcmp(p, myData->delimiter, delimiter_len) != 0) ) + { + return NULL; + } + p += delimiter_len; + } + + // we need an eol except that here is the end of buf where no need an eol + ++p; + if (p > data_border - eol_len || + (p <= data_border - eol_len && memcmp(p, myData->eol, eol_len) != 0) ) + { + return NULL; + } + return p; +} + +/** + * Given a pointer to the beginning of a buffer and a length, parse the buffer into individual columns + * @param data the pointer to the start of the buffer + * @param len total length of the buffer + * @param myData struct containing formatter options. it is also where the parsed data will go + */ +void +unpack_delimited(char *data, int len, pxfdelimited_state *myData) +{ + char *start = (char*)data; + char *location = (char*)data; + char *end = (char*)data; + StringInfo buf = makeStringInfo(); + int index = 0; + int delimiter_len = strlen(myData->delimiter); + int two_quote_len = (myData->quote != NULL ? 2 : 0); // the last quote of this column and the first quote of next column + + char* quote_hint_msg = "Please verify that columns in the data are properly quoted."; + char* col_hint_msg = "Please verify the number of columns in the table definition."; + + if (myData->quote != NULL) + { + if (*data != *myData->quote || data[len-1] != *myData->quote) + { + ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION), + errmsg("Missing quote in row head or tail"), + errhint("%s", quote_hint_msg))); + } + + // exclude the first and the last quote + ++start; + --len; + } + + while ((end - (char*)data) < len) + { + resetStringInfo(buf); + end = (char*)data + len; + if (index >= myData->nColumns) + { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Expected %d columns but found %d in the row", myData->nColumns, index), + errhint("%s", col_hint_msg))); + } + if (start == NULL) { + ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), + errmsg("Unexpected null value found while trying to read data"), + errhint("%s", col_hint_msg))); + } + + if (myData->quote != NULL && *(start-1) != *myData->quote) + { + ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION), + errmsg("Missing quote before some column"), + errhint("%s", quote_hint_msg))); + } + + location = find_first_ins_for_multiline(myData->delimiter, start, data + len, myData); + + if (location != NULL && location < end) + { + end = location; + myData->saw_delim = true; + } + int column_len = end - start; + if (column_len == 0) + { + myData->nulls[index] = true; + } + else + { + if (myData->escape == NULL) + { + appendBinaryStringInfo(buf, start, column_len); + } + else // unescape the data before adding the value + { + char *removeEscapeBuf = unescape_data(start, column_len, myData); + appendBinaryStringInfo(buf, removeEscapeBuf, strlen(removeEscapeBuf)); + pfree(removeEscapeBuf); + } + + myData->values[index] = InputFunctionCall(&myData->conv_functions[index], + buf->data, myData->typioparams[index], TupleDescAttr(myData->desc, index)->atttypmod); + myData->nulls[index] = false; + } + index++; + + start = location + delimiter_len + two_quote_len; + } + if (index < myData->nColumns) + { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Expected %d columns in row but found %d", myData->nColumns, index), + myData->saw_delim ? errhint("%s", col_hint_msg) + : errhint("Is the `delimiter` value in the format options set correctly?"))); + } +} + +/** + * Main formatter function. + * This function hooks into the GPDB framework + * in order to format the incoming data. + * @return + */ +Datum +pxfdelimited_import(PG_FUNCTION_ARGS) +{ + HeapTuple tuple; + TupleDesc tupdesc; + MemoryContext m, oldcontext; + pxfdelimited_state *myData; + char *data_buf; + int ncolumns = 0; + int data_cur; + int data_len; + + /* Must be called via the external table format manager */ + if (!CALLED_AS_FORMATTER(fcinfo)) + ereport(ERROR, (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), + errmsg("cannot execute pxfdelimited_import outside format manager"))); + + tupdesc = FORMATTER_GET_TUPDESC(fcinfo); + + /* Get our internal description of the formatter */ + ncolumns = tupdesc->natts; + myData = (pxfdelimited_state *) FORMATTER_GET_USER_CTX(fcinfo); + + /* + * Initialize the context structure + */ + if (myData == NULL) + { + myData = new_pxfdelimited_state(fcinfo); + FORMATTER_SET_USER_CTX(fcinfo, myData); + } + + if (myData->desc->natts != ncolumns) + elog(ERROR, "Unexpected change of output record type, expected %d but found %d columns", myData->desc->natts, ncolumns); + + /* get our input data buf and number of valid bytes in it */ + data_buf = FORMATTER_GET_DATABUF(fcinfo); + data_len = FORMATTER_GET_DATALEN(fcinfo); + data_cur = FORMATTER_GET_DATACURSOR(fcinfo); + + int remaining = data_len - data_cur; + + /* + * NOTE: Unexpected EOF Error Handling + * + * The first time we noticed an unexpected EOF, we'll set the data cursor + * forward and then raise the error. But then, the framework will still + * call the formatter the function again. Now, the formatter function will + * be provided with a zero length data buffer. In this case, we should not + * raise an error again, but simply return "NEED MORE DATA". This is how + * the formatter framework works. + */ + if (remaining ==0 && FORMATTER_GET_SAW_EOF(fcinfo)) + FORMATTER_RETURN_NOTIFICATION(fcinfo, FMT_NEED_MORE_DATA); + + if (FORMATTER_GET_SAW_EOF(fcinfo)) + { + // when the quote value is set, we expect all the columns to be quoted and there + // to be no extraneous characters between the quote value and the delimiter value + // or the quote value and the eol value + // it's possible that we read the entire file but did not find the expected `quote + eol` or + // `quote + delimiter` values. Throw an appropriate error in such cases + if (remaining != 0 && myData->quote != NULL) + { + if (!myData->saw_eol || (!myData->saw_delim && ncolumns > 1)) + { + ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION), + errmsg("Did not find expected `%s` character when `quote` value was provided", myData->saw_eol ? "delimiter" : "newline"), + errhint("Check the format options in the table definition. " + "Additionally, make sure there are no extraneous characters between the `quote` and `%s` values in the data.", myData->saw_eol ? "delimiter" : "newline"))); + } + } + else + { + // otherwise, the EOF found is indeed unexpected + FORMATTER_SET_BAD_ROW_DATA(fcinfo, data_buf + data_cur, remaining); + ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION), + errmsg("unexpected end of file (multibyte case)"))); + } + } + + /* start clean */ + MemSet(myData->values, 0, ncolumns * sizeof(Datum)); + MemSet(myData->nulls, true, ncolumns * sizeof(bool)); + /* + * tuple data extraction is done in a separate memory context + */ + m = FORMATTER_GET_PER_ROW_MEM_CTX(fcinfo); + oldcontext = MemoryContextSwitchTo(m); + + FORMATTER_SET_DATACURSOR(fcinfo, data_cur); + + /* + * find the first instance of the `eol` character to get an entire row + */ + char *line_border = NULL; + line_border = find_first_ins_for_multiline(myData->eol, data_buf + data_cur, data_buf + data_len, myData); + if (line_border == NULL) + { + MemoryContextSwitchTo(oldcontext); + FORMATTER_RETURN_NOTIFICATION(fcinfo, FMT_NEED_MORE_DATA); + } + myData->saw_eol = true; + int eol_len = strlen(myData->eol); // if we are handling the last line, perhaps there is no eol + int delimiter_len = strlen(myData->delimiter); + int whole_line_len = line_border - data_buf - data_cur + eol_len; // we count the eol_len; + + /* + * If we have quote = `"` and delimiter = `;` + * when we found a quote+eol like `"\n`, there are 2 possibilities: + * 1. We are inside a quoted string that contains the eol value + * a) part of a value in the middle of the line ex: `" ";"\n ";" "\n` + * b) at the front of the line ex: `"\n ";" ";" "\n`. + * 2. We have found the true eol ex: `" ";" ";" "\n` + * + * As such, we need to make sure that we correctly find the eol marker and are not taking + * any value inside quotes as the eol + * + * The two cases that we need to pay special attention to is option 1b and option 2. + * in the former situation, `"\n` is in the beginning + * in the latter situation, there must be a delimiter like `;` before `"\n` + */ + if (myData->quote != NULL && + (line_border == data_buf + data_cur + 1 || memcmp(line_border - 1 - delimiter_len, myData->delimiter, delimiter_len) == 0) ) + { + /* + * Go through the data_buf and ensure that we have the correct number of quotes and handle + */ + char *real_line_border = find_whole_line(data_buf + data_cur, data_buf + data_len, myData); + + // if we can't find a whole line by counting quote, we treat this part of data as bad data + if (real_line_border == NULL) + { + // the eol we saw was not a true eol + myData->saw_eol = false; + MemoryContextSwitchTo(oldcontext); + FORMATTER_SET_BAD_ROW_DATA(fcinfo, data_buf + data_cur, whole_line_len); + ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION), errmsg("Unable to find a row of data"))); + } + else + { + line_border = real_line_border; + whole_line_len = line_border - data_buf - data_cur + eol_len; // line_border changed + } + } + + PG_TRY(); + { + // Convert input data encoding to server encoding + char *encoded = data_buf + data_cur; + int len = whole_line_len - eol_len; + + if (myData->external_encoding != GetDatabaseEncoding()) + { + /* + * We do the encoding here instead of earlier since eol can only be LF, CR or CRLF which is the same regardless of encoding type + * We also do not need to worry about quote as it is currently limited to single one-byte character. As far as we know, + * there are no mappings between encodings that change the byte value of a one-byte character to a different byte value. + * Most such mappings would be similar to that of LATIN1 to UTF-8 that changes one-byte character to 2-byte character, ex: + * LATIN1 | UTF8 + * --------+--------- + * 0xA4 | 0xC2A4 + * 0xE1 | 0xC3A1 + * + * However, as the delimiter can be different values, we need to properly encode the data before unpacking the values + */ + encoded = pg_custom_to_server(data_buf + data_cur, + whole_line_len - eol_len, + myData->external_encoding, + myData->enc_conversion_proc); + len = strlen(encoded); + // Get a complete message, unpack to myData->values and myData->nulls + unpack_delimited(encoded, len, myData); + + // Make sure the conversion actually happened. + if (encoded != data_buf + data_cur) + { + // Memory needs to be released after encoding conversion. + pfree(encoded); + } + } + else + { + // Get a complete message, unpack to myData->values and myData->nulls + unpack_delimited(data_buf + data_cur, whole_line_len - eol_len, myData); + } + } + PG_CATCH(); + { + MemoryContextSwitchTo(oldcontext); + + FORMATTER_SET_BAD_ROW_DATA(fcinfo, data_buf + data_cur, whole_line_len); + + PG_RE_THROW(); + } + PG_END_TRY(); + //data buffer contains a complete message, set the formatter databuf cursor + FORMATTER_SET_DATACURSOR(fcinfo, data_cur + whole_line_len); + /* ======================================================================= */ + MemoryContextSwitchTo(oldcontext); + tuple = heap_form_tuple(tupdesc, myData->values, myData->nulls); + FORMATTER_SET_TUPLE(fcinfo, tuple); + FORMATTER_RETURN_TUPLE(tuple); +} diff --git a/external-table/src/pxfdelimited_formatter.h b/external-table/src/pxfdelimited_formatter.h new file mode 100644 index 0000000000..4d308ad2b0 --- /dev/null +++ b/external-table/src/pxfdelimited_formatter.h @@ -0,0 +1,45 @@ +// Portions Copyright (c) 2023 VMware, Inc. or its affiliates. + +#ifndef PXFDELIMITED_FORMATTER_H +#define PXFDELIMITED_FORMATTER_H + +#include "postgres.h" + +#include "fmgr.h" +#include "funcapi.h" + +#include "access/formatter.h" +#include "catalog/pg_proc.h" +#include "utils/builtins.h" +#include "utils/memutils.h" +#include "utils/typcache.h" +#include "utils/syscache.h" +#include "utils/datetime.h" + +#include "lib/stringinfo.h" + +typedef struct { + TupleDesc desc; + Datum *values; + bool *nulls; + FmgrInfo *conv_functions; + Oid *typioparams; + char *delimiter; + char *eol; + char *quote; + char *escape; + char *quote_delimiter; /* only for searching for border, not in the config file */ + char *quote_eol; /* only for searching for border, not in the config file */ + int nColumns; + + int external_encoding; /* remote side's character encoding */ + FmgrInfo *enc_conversion_proc; /* conv proc from exttbl encoding to + server or the other way around */ + bool saw_delim; + bool saw_eol; +} pxfdelimited_state; + +extern void +unpack_delimited(char *data, int len, pxfdelimited_state *myData); + +#endif diff --git a/external-table/src/pxfheaders.c b/external-table/src/pxfheaders.c index 57efb49005..90db87019c 100644 --- a/external-table/src/pxfheaders.c +++ b/external-table/src/pxfheaders.c @@ -37,6 +37,8 @@ static void add_alignment_size_httpheader(CHURL_HEADERS headers); static void add_tuple_desc_httpheader(CHURL_HEADERS headers, Relation rel); static void add_location_options_httpheader(CHURL_HEADERS headers, GPHDUri *gphduri); static char *get_format_name(ExtTableEntry *exttbl); +static char *getFormatterString(ExtTableEntry *exttbl); +static bool isFormatterPxfDelimited(ExtTableEntry *exttbl); static bool isFormatterPxfWritable(ExtTableEntry *exttbl); static void add_projection_desc_httpheaders(CHURL_HEADERS headers, ProjectionInfo *projInfo, List *qualsAttributes, Relation rel); static bool add_attnums_from_targetList(Node *node, List *attnums); @@ -90,6 +92,20 @@ build_http_headers(PxfInputData *input) ListCell *option; List *copyFmtOpts = NIL; + // in the case of PxfDelimitedFormatter formatter, the only viable profiles are *:text and *:csv. + // error out early here if the profile is not accepted + if (getFormatterString(exttbl) && // if the formatter string is non empty + isFormatterPxfDelimited(exttbl) && // and the formatter is PxfDelimitedFormatter + (!input->gphduri->profile || // if the profile is empty OR + (!strstr(input->gphduri->profile, ":text") && // the profile is neither text + !strstr(input->gphduri->profile, ":csv")))) // nor csv + { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("The \"%s\" formatter only works with *:text or *:csv profiles.", PxfDelimitedFormatter), + errhint("Please double check the profile option in the external table definition."))); + } + /* pxf treats everything but pxfwritable_[import|export] as TEXT (even CSV) */ char *format = get_format_name(exttbl); @@ -655,10 +671,12 @@ get_format_name(ExtTableEntry *exttbl) } /* - * Checks if the custom formatter specified for the table starts with pxfwritable_ prefix + * Returns the name of the formatter (GP7) + * Returns the string containing all format options information (GP6 & GP5) + * Returns NULL otherwise */ -static bool -isFormatterPxfWritable(ExtTableEntry *exttbl) +static char* +getFormatterString(ExtTableEntry *exttbl) { char *formatterNameSearchString = NULL; @@ -673,11 +691,39 @@ isFormatterPxfWritable(ExtTableEntry *exttbl) } } #else - // we only have a serialized string of formatter options, parsing it requires porting a lot of code from GP6 - // instead, we will only search for occurrence of the "pxfwritable_" prefix in the whole string + // for GP5 and GP6, we only have a serialized string of formatter options, + // parsing it requires porting a lot of code from GP6 so return the entire serialized string formatterNameSearchString = exttbl->fmtopts; #endif + return formatterNameSearchString; +} + +/* + * Checks if the custom formatter specified for the table is pxfdelimited_import + */ +static bool +isFormatterPxfDelimited(ExtTableEntry *exttbl) +{ + char *formatterNameSearchString = getFormatterString(exttbl); + + if (!formatterNameSearchString || !strlen(formatterNameSearchString)) + { + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("cannot determine the name of a custom formatter"))); + } + + return strstr(formatterNameSearchString, PxfDelimitedFormatter) != NULL; +} +/* + * Checks if the custom formatter specified for the table starts with pxfwritable_ prefix + */ +static bool +isFormatterPxfWritable(ExtTableEntry *exttbl) +{ + char *formatterNameSearchString = getFormatterString(exttbl); + if (!formatterNameSearchString || !strlen(formatterNameSearchString)) { ereport(ERROR, diff --git a/external-table/src/pxfheaders.h b/external-table/src/pxfheaders.h index 2650c7a0d1..9c864f1266 100644 --- a/external-table/src/pxfheaders.h +++ b/external-table/src/pxfheaders.h @@ -23,6 +23,7 @@ #define GpdbWritableFormatName "GPDBWritable" #define TextFormatName "TEXT" #define PXFWritableFormatterPrefix "pxfwritable_" +#define PxfDelimitedFormatter "pxfdelimited_import" #include "libchurl.h" #include "pxfuriparser.h" diff --git a/server/pxf-service/src/scripts/pxf-post-gpupgrade b/server/pxf-service/src/scripts/pxf-post-gpupgrade index 1b0e8eca88..c1ca755240 100755 --- a/server/pxf-service/src/scripts/pxf-post-gpupgrade +++ b/server/pxf-service/src/scripts/pxf-post-gpupgrade @@ -47,8 +47,9 @@ EOF pxf_gpdb_major_version="$(awk 'BEGIN { FS = "=" } /gpdb.major-version/{ print $2 }' "${PXF_HOME}/gpextable/metadata")" gp_version="$(psql --no-align --tuples-only --command 'SELECT substring(version(), $$.*Greenplum Database (.*) build.*$$)')" +pxf_version="$(cat "${PXF_HOME}"/version)" -echo "PXF compiled against GPDB major version '${pxf_gpdb_major_version}'" >>"${log_file}" +echo "PXF ${pxf_version} compiled against GPDB major version '${pxf_gpdb_major_version}'" >>"${log_file}" echo "Running GPDB cluster is version '${gp_version}'" >>"${log_file}" if [[ "${pxf_gpdb_major_version}" != "${gp_version%%.*}" ]]; then @@ -113,6 +114,18 @@ psql --no-align --tuples-only --command "SELECT datname FROM pg_catalog.pg_datab AS '${PXF_HOME}/gpextable/pxf', 'gpdbwritableformatter_export' LANGUAGE C STABLE; END_OF_SQL + + # PXF extension 2.1 introduces a new function, only update the function if it exists + # bash and POSIX sh don't support decimals in numbers, however we are doing a string comparison + # on the version here so let us ignore the shellcheck error + # shellcheck disable=SC2072 + if [[ $(psql --dbname="${dbname}" --no-align --tuples-only --command "SELECT extversion FROM pg_catalog.pg_extension WHERE extname = 'pxf'") > 2.0 ]]; then + psql --dbname="${dbname}" --set ON_ERROR_STOP=on &>>"${log_file}" <<-END_OF_SQL + CREATE OR REPLACE FUNCTION pg_catalog.pxfdelimited_import() RETURNS record + AS '${PXF_HOME}/gpextable/pxf', 'pxfdelimited_import' + LANGUAGE C STABLE; + END_OF_SQL + fi done echo "Success" | tee -a "${log_file}" diff --git a/server/pxf-service/src/scripts/pxf-pre-gpupgrade b/server/pxf-service/src/scripts/pxf-pre-gpupgrade index 80fec42f5e..e9aa2b88ca 100755 --- a/server/pxf-service/src/scripts/pxf-pre-gpupgrade +++ b/server/pxf-service/src/scripts/pxf-pre-gpupgrade @@ -47,8 +47,9 @@ EOF pxf_gpdb_major_version="$(awk 'BEGIN { FS = "=" } /gpdb.major-version/{ print $2 }' "${PXF_HOME}/gpextable/metadata")" gp_version="$(psql --no-align --tuples-only --command 'SELECT substring(version(), $$.*Greenplum Database (.*) build.*$$)')" +pxf_version="$(cat "${PXF_HOME}"/version)" -echo "PXF compiled against GPDB major version '${pxf_gpdb_major_version}'" >>"${log_file}" +echo "PXF ${pxf_version} compiled against GPDB major version '${pxf_gpdb_major_version}'" >>"${log_file}" echo "Running GPDB cluster is version '${gp_version}'" >>"${log_file}" if [[ "${pxf_gpdb_major_version}" != "${gp_version%%.*}" ]]; then @@ -113,6 +114,18 @@ psql --no-align --tuples-only --command "SELECT datname FROM pg_catalog.pg_datab AS 'pxf', 'gpdbwritableformatter_export' LANGUAGE C STABLE; END_OF_SQL + + # PXF extension 2.1 introduces a new function, only update the function if it exists + # bash and POSIX sh don't support decimals in numbers, however we are doing a string comparison + # on the version here so let us ignore the shellcheck error + # shellcheck disable=SC2072 + if [[ $(psql --dbname="${dbname}" --no-align --tuples-only --command "SELECT extversion FROM pg_catalog.pg_extension WHERE extname = 'pxf'") > 2.0 ]]; then + psql --dbname="${dbname}" --set ON_ERROR_STOP=on &>>"${log_file}" <<-END_OF_SQL + CREATE OR REPLACE FUNCTION pg_catalog.pxfdelimited_import() RETURNS record + AS 'pxf', 'pxfdelimited_import' + LANGUAGE C STABLE; + END_OF_SQL + fi done echo "Success" | tee -a "${log_file}" From 0fb8d369da52b9c8d4cc7af4806d928796986cdc Mon Sep 17 00:00:00 2001 From: Ashuka Xue Date: Fri, 5 May 2023 10:48:13 -0700 Subject: [PATCH 05/35] Quick fix to run correct test suite after extension upgrade (#971) --- concourse/pipelines/templates/build_pipeline-tpl.yml | 2 +- concourse/tasks/upgrade_extension.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/concourse/pipelines/templates/build_pipeline-tpl.yml b/concourse/pipelines/templates/build_pipeline-tpl.yml index 7b5706474e..baaaa171f8 100644 --- a/concourse/pipelines/templates/build_pipeline-tpl.yml +++ b/concourse/pipelines/templates/build_pipeline-tpl.yml @@ -1014,7 +1014,7 @@ jobs: ACCESS_KEY_ID: ((tf-machine-access-key-id)) GP_VER: [[gp_ver]] GROUP: pxfExtensionVersion2 - SECOND_GROUP: upgradePxfExtension + SECOND_GROUP: pxfExtensionVersion2_1 SECRET_ACCESS_KEY: ((tf-machine-secret-access-key)) ## ---------- FILE tests ----------------- diff --git a/concourse/tasks/upgrade_extension.yml b/concourse/tasks/upgrade_extension.yml index 89799feb3d..97ede79fe8 100644 --- a/concourse/tasks/upgrade_extension.yml +++ b/concourse/tasks/upgrade_extension.yml @@ -16,7 +16,7 @@ inputs: params: GP_VER: GROUP: pxfExtensionVersion2 - SECOND_GROUP: pxfCxtensionVersion2_1 + SECOND_GROUP: pxfExtensionVersion2_1 HADOOP_CLIENT: HDP IMPERSONATION: true ADJUST_AUTOMATION: false From af9c1454be8d4229b39ffdf51dc93f3c8054bc83 Mon Sep 17 00:00:00 2001 From: "Bradford D. Boyle" Date: Thu, 18 May 2023 09:16:21 -0700 Subject: [PATCH 06/35] Remove unnecessary container images (#970) Ubuntu 18.04 will not be a supported platform for GP7 and GP RelEng has deleted the upstream image gpdb7-ubuntu18.04-test. This commit removes the PXF dev base container build for GP7 Ubuntu 18.04. It also updates the EL8 container image for GP6 to use Rocky 8, same as the container image for GP7. Authored-By: Bradford D. Boyle --- concourse/docker/README.md | 269 +++++- concourse/docker/diagram/Makefile | 6 - concourse/docker/diagram/README.md | 7 - concourse/docker/diagram/images.dot | 242 ----- concourse/docker/diagram/images.svg | 900 ------------------ concourse/docker/pxf-dev-base/README.md | 53 +- concourse/docker/pxf-dev-base/cloudbuild.yaml | 104 +- .../gpdb6/{rhel8 => rocky8}/Dockerfile | 2 +- concourse/docker/rpmrebuild/cloudbuild.yaml | 21 +- concourse/docker/rpmrebuild/rhel/Dockerfile | 16 - concourse/docker/rpmrebuild/rocky/Dockerfile | 6 + .../pipelines/certification_pipeline.yml | 8 +- concourse/pipelines/cloudbuild_pipeline.yml | 2 +- .../templates/build_pipeline-tpl.yml | 30 +- .../templates/dev_build_pipeline-tpl.yml | 18 +- .../pipelines/templates/perf_pipeline-tpl.yml | 2 +- .../pipelines/templates/pr_pipeline-tpl.yml | 12 +- 17 files changed, 281 insertions(+), 1417 deletions(-) delete mode 100644 concourse/docker/diagram/Makefile delete mode 100644 concourse/docker/diagram/README.md delete mode 100644 concourse/docker/diagram/images.dot delete mode 100644 concourse/docker/diagram/images.svg rename concourse/docker/pxf-dev-base/gpdb6/{rhel8 => rocky8}/Dockerfile (98%) delete mode 100644 concourse/docker/rpmrebuild/rhel/Dockerfile create mode 100644 concourse/docker/rpmrebuild/rocky/Dockerfile diff --git a/concourse/docker/README.md b/concourse/docker/README.md index b91cb76e01..84893608ad 100644 --- a/concourse/docker/README.md +++ b/concourse/docker/README.md @@ -15,45 +15,13 @@ changes to `pxf-build-base` and is also in charge of tagging the images as ## Available docker images - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 Greenplum 5Greenplum 6Greenplum 7
CentOS7 gpdb5-centos7-test-pxf gpdb6-centos7-test-pxf gpdb7-centos7-test-pxf
OEL7 N/A gpdb6-oel7-test-pxf N/A
Ubuntu 18.04 N/A gpdb6-ubuntu18.04-test-pxf gpdb7-ubuntu18.04-test-pxf
Rocky8 N/A N/A gpdb7-rocky8-test-pxf
MapR on CentOS7 gpdb6-centos7-test-pxf-mapr N/A
- -* Note: GCR_PROJECT_ID is the name of the Google Cloud Project ID +| | Greenplum 5 | Greenplum 6 | Greenplum 7 | +|------------------|--------------------------|-------------------------------|------------------------------| +| CentOS 7 | `gpdb5-centos7-test-pxf` | `gpdb6-centos7-test-pxf` | N/A | +| OEL 7 | N/A | `gpdb6-oel7-test-pxf` | N/A | +| Ubuntu 18.04 | N/A | `gpdb6-ubuntu18.04-test-pxf` | N/A | +| Rocky Linux 8 | N/A | `gpdb6-rocky8-test-pxf` | `gpdb7-rocky8-test-pxf` | +| MapR on CentOS 7 | N/A | `gpdb6-centos7-test-pxf-mapr` | N/A | ## Development docker image @@ -62,3 +30,226 @@ A PXF development docker image can be pulled with the following command: ```shell script docker pull gcr.io/${GCR_PROJECT_ID}/gpdb-pxf-dev/gpdb6-centos7-test-pxf-hdp2:latest ``` + +## Diagram of Container Image Building + +This [Mermaid](https://mermaid.js.org/intro/) diagram details the docker images that are used and created by PXF pipelines and developers. + +```mermaid +%%{init: {'theme':'neutral'}}%% +flowchart TD + classDef subgraphStyle fill:none,stroke-dasharray:5,5,stroke:black + classDef dockerhubStyle fill:#268bd2,color:white,stroke:none + classDef gcrPublicStyle fill:#2aa198,stroke:none,color:white + classDef dockerfileStyle fill:#fdf6e3,stroke:none + classDef pipelineStyle fill:#d33682,color:white,stroke:none + classDef latestStyle fill:#6c71c4,color:white,stroke:none + classDef plainStyle fill:none,stroke:black + + subgraph dockerhub [Official DockerHub] + centos7[centos:7] + rocky8[rockylinux:8] + class centos7 dockerhubStyle + class rocky8 dockerhubStyle + + end + class dockerhub subgraphStyle + + subgraph gcr_images ["GP RelEng Images (gcr.io/data-gpdb-public-images)"] + gp5_centos7_latest[centos-gpdb-dev:7-gcc6.2-llvm3.7] + gp6_centos7_latest[gpdb6-centos7-test:latest] + gp6_ubuntu18_latest[gpdb6-ubuntu18.04-test:latest] + gp6_oel7_latest[gpdb6-oel7-test:latest] + gp6_rocky8_latest[gpdb6-rocky8-test:latest] + gp7_rocky8_latest[gpdb7-rocky8-test:latest] + + class gp5_centos7_latest gcrPublicStyle + class gp6_centos7_latest gcrPublicStyle + class gp6_ubuntu18_latest gcrPublicStyle + class gp6_oel7_latest gcrPublicStyle + class gp6_rocky8_latest gcrPublicStyle + class gp7_rocky8_latest gcrPublicStyle + end + class gcr_images subgraphStyle + + subgraph pxf_dev_base [pxf-dev-base/cloudbuild.yaml] + gp5_centos7_dockerfile[gpdb5/centos7] + gp6_centos7_dockerfile[gpdb6/centos7] + gp6_rocky8_dockerfile[gpdb6/rocky8] + gp6_ubuntu18_dockerfile[gpdb6/ubuntu18.04] + gp6_oel7_dockerfile[gpdb6/oel7] + gp7_rocky8_dockerfile[gpdb7/rocky8] + + class gp5_centos7_dockerfile dockerfileStyle + class gp6_centos7_dockerfile dockerfileStyle + class gp6_rocky8_dockerfile dockerfileStyle + class gp6_ubuntu18_dockerfile dockerfileStyle + class gp6_oel7_dockerfile dockerfileStyle + class gp7_rocky8_dockerfile dockerfileStyle + end + class pxf_dev_base subgraphStyle + + subgraph rpmrebuild [rpmrebuild/cloudbuild.yaml] + rpm_docker_centos7[centos/Dockerfile] + rpm_docker_rocky8[rocky/Dockerfile] + + class rpm_docker_centos7 dockerfileStyle + class rpm_docker_rocky8 dockerfileStyle + end + class rpmrebuild subgraphStyle + + subgraph gcr_data_gpdb_ud [gcr.io/data-gpdb-ud] + subgraph gpdb_pxf_dev [gpdb-pxf-dev] + gp5_centos7_pxf_sha[gpdb5-centos7-test-pxf:$COMMIT_SHA] + gp6_centos7_pxf_sha[gpdb6-centos7-test-pxf:$COMMIT_SHA] + gp6_rocky8_pxf_sha[gpdb6-rocky8-test-pxf:$COMMIT_SHA] + gp6_ubuntu18_pxf_sha[gpdb6-ubuntu18.04-test-pxf:$COMMIT_SHA] + gp6_oel7_pxf_sha[gpdb6-oel7-test-pxf:$COMMIT_SHA] + gp7_rocky8_pxf_sha[gpdb7-rocky8-test-pxf:$COMMIT_SHA] + + class gp5_centos7_pxf_sha plainStyle + class gp6_centos7_pxf_sha plainStyle + class gp6_rocky8_pxf_sha plainStyle + class gp6_ubuntu18_pxf_sha plainStyle + class gp6_oel7_pxf_sha plainStyle + class gp7_rocky8_pxf_sha plainStyle + + gp5_centos7_pxf_latest[gpdb5-centos7-test-pxf:latest] + gp6_centos7_pxf_latest[gpdb6-centos7-test-pxf:latest] + gp6_rocky8_pxf_latest[gpdb6-rocky8-test-pxf:latest] + gp6_ubuntu18_pxf_latest[gpdb6-ubuntu18.04-test-pxf:latest] + gp6_oel7_pxf_latest[gpdb6-oel7-test-pxf:latest] + gp7_rocky8_pxf_latest[gpdb7-rocky8-test-pxf:latest] + + class gp5_centos7_pxf_latest latestStyle + class gp6_centos7_pxf_latest latestStyle + class gp6_rocky8_pxf_latest latestStyle + class gp6_ubuntu18_pxf_latest latestStyle + class gp6_oel7_pxf_latest latestStyle + class gp7_rocky8_pxf_latest latestStyle + end + class gpdb_pxf_dev subgraphStyle + + rpm_centos7_latest[rpmrebuild-centos7:latest] + rpm_rocky8_latest[rpmrebuild-rocky8:latest] + + class rpm_centos7_latest latestStyle + class rpm_rocky8_latest latestStyle + end + class gcr_data_gpdb_ud subgraphStyle + + subgraph local_use_only [For local development use] + subgraph pxf_dev_server [pxf-dev-server/cloudbuild.yaml] + server_dockerfile[Dockerfile] + class server_dockerfile dockerfileStyle + end + class pxf_dev_server subgraphStyle + + subgraph mapr [mapr/cloudbuild.yaml] + mapr_dockerfile[Dockerfile] + class mapr_dockerfile dockerfileStyle + end + class mapr subgraphStyle + + subgraph gcr_data_gpdb_ud_mapr ["MapR Images (gcr.io/data-gpdb-ud)"] + gp6_centos7_pxf_mapr_sha[gpdb-pxf-dev/gpdb6-centos7-test-pxf-mapr:$COMMIT_SHA] + gp6_centos7_pxf_mapr_latest[gpdb-pxf-dev/gpdb6-centos7-test-pxf-mapr:latest] + + class gp6_centos7_pxf_mapr_sha plainStyle + class gp6_centos7_pxf_mapr_latest latestStyle + end + class gcr_data_gpdb_ud_mapr subgraphStyle + + subgraph gcr_data_gpdb_ud_hdp2 ["HDP2 (gcr.io/data-gpdb-ud)"] + gp6_centos7_pxf_hdp2_sha[gpdb-pxf-dev/gpdb6-centos7-test-pxf-hdp2:$COMMIT_SHA] + gp6_centos7_pxf_hdp2_latest[gpdb-pxf-dev/gpdb6-centos7-test-pxf-hdp2] + + class gp6_centos7_pxf_hdp2_sha plainStyle + style gp6_centos7_pxf_hdp2_latest fill:#b58900,color:white,stroke:none + end + class gcr_data_gpdb_ud_hdp2 subgraphStyle + end + class local_use_only subgraphStyle + + subgraph pipelines [Pipelines] + certification + perf + longevity + build[pxf-build] + pr[pxf_pr_pipeline] + + class certification pipelineStyle + class perf pipelineStyle + class longevity pipelineStyle + class build pipelineStyle + class pr pipelineStyle + end + class pipelines subgraphStyle + + gp5_centos7_latest --> gp5_centos7_dockerfile + gp5_centos7_dockerfile -- CloudBuild --> gp5_centos7_pxf_sha + gp5_centos7_pxf_sha -- "tag (concourse pipeline)" --> gp5_centos7_pxf_latest + + gp6_centos7_latest --> gp6_centos7_dockerfile + gp6_centos7_dockerfile -- CloudBuild --> gp6_centos7_pxf_sha + gp6_centos7_pxf_sha -- "tag (concourse pipeline)" --> gp6_centos7_pxf_latest + + gp6_rocky8_latest --> gp6_rocky8_dockerfile + gp6_rocky8_dockerfile -- CloudBuild --> gp6_rocky8_pxf_sha + gp6_rocky8_pxf_sha -- "tag (concourse pipeline)" --> gp6_rocky8_pxf_latest + + gp6_ubuntu18_latest --> gp6_ubuntu18_dockerfile + gp6_ubuntu18_dockerfile -- CloudBuild --> gp6_ubuntu18_pxf_sha + gp6_ubuntu18_pxf_sha -- "tag (concourse pipeline)" --> gp6_ubuntu18_pxf_latest + + gp6_oel7_latest --> gp6_oel7_dockerfile + gp6_oel7_dockerfile -- CloudBuild --> gp6_oel7_pxf_sha + gp6_oel7_pxf_sha -- "tag (concourse pipeline)" --> gp6_oel7_pxf_latest + + gp7_rocky8_latest --> gp7_rocky8_dockerfile + gp7_rocky8_dockerfile -- CloudBuild --> gp7_rocky8_pxf_sha + gp7_rocky8_pxf_sha -- "tag (concourse pipeline)" --> gp7_rocky8_pxf_latest + + centos7 --> rpm_docker_centos7 + rpm_docker_centos7 --> rpm_centos7_latest + rocky8 --> rpm_docker_rocky8 + rpm_docker_rocky8 --> rpm_rocky8_latest + + gp5_centos7_pxf_latest --> mapr_dockerfile + gp6_centos7_pxf_latest --> mapr_dockerfile + mapr_dockerfile -- "CloudBuild (install MapR)" --> gp6_centos7_pxf_mapr_sha + gp6_centos7_pxf_mapr_sha -- "tag (concourse pipeline)" --> gp6_centos7_pxf_mapr_latest + + gp6_centos7_pxf_latest --> server_dockerfile + server_dockerfile -- "CloudBuild (add singlecluster, build deps, & automation deps)" --> gp6_centos7_pxf_hdp2_sha + gp6_centos7_pxf_hdp2_sha --> gp6_centos7_pxf_hdp2_latest + + gp5_centos7_pxf_latest --> certification + gp5_centos7_pxf_latest --> build + gp5_centos7_pxf_latest --> pr + + gp6_centos7_pxf_latest --> certification + gp6_centos7_pxf_latest --> longevity + gp6_centos7_pxf_latest --> perf + gp6_centos7_pxf_latest --> build + gp6_centos7_pxf_latest --> pr + + gp6_rocky8_pxf_latest --> certification + gp6_rocky8_pxf_latest --> perf + gp6_rocky8_pxf_latest --> build + gp6_rocky8_pxf_latest --> pr + + gp6_ubuntu18_pxf_latest --> certification + gp6_ubuntu18_pxf_latest --> build + gp6_ubuntu18_pxf_latest --> pr + + gp6_oel7_pxf_latest --> build + gp6_oel7_pxf_latest --> pr + + gp7_rocky8_pxf_latest --> certification + gp7_rocky8_pxf_latest --> build + gp7_rocky8_pxf_latest --> pr + + rpm_centos7_latest --> build + rpm_rocky8_latest --> build +``` diff --git a/concourse/docker/diagram/Makefile b/concourse/docker/diagram/Makefile deleted file mode 100644 index fe1dd23055..0000000000 --- a/concourse/docker/diagram/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -ifeq (, $(shell type dot)) -$(error "No dot in $(PATH), install graphviz") -endif - -images.svg: images.dot - dot -Tsvg images.dot -o images.svg diff --git a/concourse/docker/diagram/README.md b/concourse/docker/diagram/README.md deleted file mode 100644 index b76aa50da6..0000000000 --- a/concourse/docker/diagram/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Diagram of Container Image Building - -![container image building](./images.svg) - -This diagram details the docker images that are used and created by PXF pipelines and developers. It was generated using graphviz. - -It can be recreated using the dot file by running `make` within this directory. diff --git a/concourse/docker/diagram/images.dot b/concourse/docker/diagram/images.dot deleted file mode 100644 index c804f6cf0a..0000000000 --- a/concourse/docker/diagram/images.dot +++ /dev/null @@ -1,242 +0,0 @@ -digraph pxf_container_image_flow { - subgraph cluster_dockerhub { - label = "Official DockerHub" - style=dashed - node [shape=box3d fillcolor="#268bd2" style=filled fontcolor=white] - - centos7[label="centos:7"] - } - - # gcr.io/data-gpdb-public-images - subgraph cluster_gcr_images { - label="GP RelEng Images (gcr.io/data-gpdb-public-images)" - style=dashed - node [shape=box3d fillcolor="#2aa198" style=filled fontcolor=white] - gp5_centos7_latest[label="centos-gpdb-dev:7-gcc6.2-llvm3.7"] - gp6_centos7_latest[label="gpdb6-centos7-test:latest"] - gp6_ubuntu18_latest[label="gpdb6-ubuntu18.04-test:latest"] - gp6_oel7_latest[label="gpdb6-oel7-test:latest"] - gp7_centos7_latest[label="gpdb7-centos7-test:latest"] - gp7_rocky8_latest[label="gpdb7-rocky8-test:latest"] - gp7_ubuntu18_latest[label="gpdb7-ubuntu18.04-test:latest"] - } - - subgraph cluster_gcr_images_private { - label="GP RelEng Images (gcr.io/data-gpdb-private-images)" - style=dashed - node [shape=box3d fillcolor="#2aa198" style=filled fontcolor=white] - gp6_rhel8_latest[label="gpdb6-rhel8-test:latest"] - gp7_rhel8_latest[label="gpdb7-rhel8-test:latest"] - } - - # PXF Cloudbuild & Dockerfiles - subgraph cluster_pxf_dev_base { - label = "pxf-dev-base/cloudbuild.yaml" - style=dashed - node [shape=note fillcolor="#fdf6e3" style=filled] - - gp5_centos7_dockerfile[label="gpdb5/centos7"] - gp6_centos7_dockerfile[label="gpdb6/centos7"] - gp6_rhel8_dockerfile[label="gpdb6/rhel8"] - gp6_ubuntu18_dockerfile[label="gpdb6/ubuntu18.04"] - gp6_oel7_dockerfile[label="gpdb6/oel7"] - gp7_centos7_dockerfile[label="gpdb7/centos7"] - gp7_rhel8_dockerfile[label="gpdb7/rhel8"] - gp7_rocky8_dockerfile[label="gpdb7/rocky8"] - gp7_ubuntu18_dockerfile[label="gpdb7/ubuntu18.04"] - - } - - subgraph cluster_rpmrebuild { - label = "rpmrebuild/cloudbuild.yaml" - style=dashed - node [shape=note fillcolor="#fdf6e3" style=filled] - - rpm_docker_centos7[label="centos/Dockerfile"] - rpm_docker_rhel8[label="rhel/Dockerfile"] - } - - # UD GCR images - subgraph cluster_gcr_data_gpdb_ud { - label = "gcr.io/data-gpdb-ud" - style=dashed - node [shape=box] - - subgraph cluster_gpdb_pxf_dev { - label = "gpdb-pxf-dev" - style=dashed - node [shape=box] - - gp5_centos7_pxf_sha[label="gpdb5-centos7-test-pxf:$COMMIT_SHA"] - gp6_centos7_pxf_sha[label="gpdb6-centos7-test-pxf:$COMMIT_SHA"] - gp6_rhel8_pxf_sha[label="gpdb6-rhel8-test-pxf:$COMMIT_SHA"] - gp6_ubuntu18_pxf_sha[label="gpdb6-ubuntu18.04-test-pxf:$COMMIT_SHA"] - gp6_oel7_pxf_sha[label="gpdb6-oel7-test-pxf:$COMMIT_SHA"] - gp7_centos7_pxf_sha[label="gpdb7-centos7-test-pxf:$COMMIT_SHA"] - gp7_rhel8_pxf_sha[label="gpdb7-rhel8-test-pxf:$COMMIT_SHA"] - gp7_rocky8_pxf_sha[label="gpdb7-rocky8-test-pxf:$COMMIT_SHA"] - gp7_ubuntu18_pxf_sha[label="gpdb7-ubuntu18.04-test-pxf:$COMMIT_SHA"] - - gp5_centos7_pxf_latest[label="gpdb5-centos7-test-pxf:latest" style=filled fillcolor="#6c71c4" fontcolor=white] - gp6_centos7_pxf_latest[label="gpdb6-centos7-test-pxf:latest" style=filled fillcolor="#6c71c4" fontcolor=white] - gp6_rhel8_pxf_latest[label="gpdb6-rhel8-test-pxf:latest" style=filled fillcolor="#6c71c4" fontcolor=white] - gp6_ubuntu18_pxf_latest[label="gpdb6-ubuntu18.04-test-pxf:latest" style=filled fillcolor="#6c71c4" fontcolor=white] - gp6_oel7_pxf_latest[label="gpdb6-oel7-test-pxf:latest" style=filled fillcolor="#6c71c4" fontcolor=white] - gp7_centos7_pxf_latest[label="gpdb7-centos7-test-pxf:latest" style=filled fillcolor="#6c71c4" fontcolor=white] - gp7_rhel8_pxf_latest[label="gpdb7-rhel8-test-pxf:latest" style=filled fillcolor="#6c71c4" fontcolor=white] - gp7_rocky8_pxf_latest[label="gpdb7-rocky8-test-pxf:latest" style=filled fillcolor="#6c71c4" fontcolor=white] - gp7_ubuntu18_pxf_latest[label="gpdb7-ubuntu18.04-test-pxf:latest" style=filled fillcolor="#6c71c4" fontcolor=white] - } - - rpm_centos7_latest[label="rpmrebuild-centos7:latest" style=filled fillcolor="#6c71c4" fontcolor=white] - rpm_rhel8_latest[label="rpmrebuild-rhel8:latest" style=filled fillcolor="#6c71c4" fontcolor=white] - rpm_rocky8_latest[label="rpmrebuild-rocky8:latest" style=filled fillcolor="#6c71c4" fontcolor=white] - } - - subgraph cluster_pipelines { - label = "Pipelines" - style=dashed - node [shape=box style=filled fillcolor="#d33682 " fontcolor=white] - - certification - perf - longevity - build[label="pxf-build"] - pr[label="pxf_pr_pipeline"] - } - - subgraph cluster_local_use_only { - label = "For local development use" - style=dashed - node [shape=box] - - subgraph cluster_pxf_dev_server { - label = "pxf-dev-server/cloudbuild.yaml" - style=dashed - node [shape=note fillcolor="#fdf6e3" style=filled] - - server_dockerfile[label="Dockerfile"] - } - - subgraph cluster_mapr { - label = "mapr/cloudbuild.yaml" - style=dashed - node [shape=note fillcolor="#fdf6e3" style=filled] - - mapr_dockerfile[label="Dockerfile"] - - } - - subgraph cluster_gcr_data_gpdb_ud_mapr { - label = "MapR Images (gcr.io/data-gpdb-ud)" - style=dashed - node [shape=box] - gp6_centos7_pxf_mapr_sha[label="gpdb-pxf-dev/gpdb6-centos7-test-pxf-mapr:$COMMIT_SHA"] - gp6_centos7_pxf_mapr_latest[label="gpdb-pxf-dev/gpdb6-centos7-test-pxf-mapr:latest" style=filled fillcolor="#6c71c4" fontcolor=white] - - } - - subgraph cluster_gcr_data_gpdb_ud_hdp2 { - label = "HDP2 (gcr.io/data-gpdb-ud)" - style=dashed - node [shape=box] - gp6_centos7_pxf_hdp2_sha[label="gpdb-pxf-dev/gpdb6-centos7-test-pxf-hdp2:$COMMIT_SHA"] - gp6_centos7_pxf_hdp2_latest[label="gpdb-pxf-dev/gpdb6-centos7-test-pxf-hdp2" style=filled fillcolor="#b58900" fontcolor=white] - } - } - gp5_centos7_latest -> gp5_centos7_dockerfile - gp5_centos7_dockerfile -> gp5_centos7_pxf_sha[label="CloudBuild"] - gp5_centos7_pxf_sha -> gp5_centos7_pxf_latest[label="tag (concourse pipeline)"] - - gp6_centos7_latest -> gp6_centos7_dockerfile - gp6_centos7_dockerfile -> gp6_centos7_pxf_sha[label="CloudBuild"] - gp6_centos7_pxf_sha -> gp6_centos7_pxf_latest[label="tag (concourse pipeline)"] - - gp6_rhel8_latest -> gp6_rhel8_dockerfile - gp6_rhel8_dockerfile -> gp6_rhel8_pxf_sha[label="CloudBuild"] - gp6_rhel8_pxf_sha -> gp6_rhel8_pxf_latest[label="tag (concourse pipeline)"] - - gp6_ubuntu18_latest -> gp6_ubuntu18_dockerfile - gp6_ubuntu18_dockerfile -> gp6_ubuntu18_pxf_sha[label="CloudBuild"] - gp6_ubuntu18_pxf_sha -> gp6_ubuntu18_pxf_latest[label="tag (concourse pipeline)"] - - gp6_oel7_latest -> gp6_oel7_dockerfile - gp6_oel7_dockerfile -> gp6_oel7_pxf_sha[label="CloudBuild"] - gp6_oel7_pxf_sha -> gp6_oel7_pxf_latest[label="tag (concourse pipeline)"] - - gp7_centos7_latest -> gp7_centos7_dockerfile - gp7_centos7_dockerfile -> gp7_centos7_pxf_sha[label="CloudBuild"] - gp7_centos7_pxf_sha -> gp7_centos7_pxf_latest[label="tag (concourse pipeline)"] - - gp7_rhel8_latest -> gp7_rhel8_dockerfile - gp7_rhel8_dockerfile -> gp7_rhel8_pxf_sha[label="CloudBuild"] - gp7_rhel8_pxf_sha -> gp7_rhel8_pxf_latest[label="tag (concourse pipeline)"] - - gp7_rocky8_latest -> gp7_rocky8_dockerfile - gp7_rocky8_dockerfile -> gp7_rocky8_pxf_sha[label="CloudBuild"] - gp7_rocky8_pxf_sha -> gp7_rocky8_pxf_latest[label="tag (concourse pipeline)"] - - gp7_ubuntu18_latest -> gp7_ubuntu18_dockerfile - gp7_ubuntu18_dockerfile -> gp7_ubuntu18_pxf_sha[label="CloudBuild"] - gp7_ubuntu18_pxf_sha -> gp7_ubuntu18_pxf_latest[label="tag (concourse pipeline)"] - - centos7 -> rpm_docker_centos7 - rpm_docker_centos7 -> rpm_centos7_latest - - gp6_rhel8_latest -> rpm_docker_rhel8 - gp7_rocky8_latest -> rpm_docker_rhel8 - rpm_docker_rhel8 -> rpm_rhel8_latest - rpm_docker_rhel8 -> rpm_rocky8_latest - - gp5_centos7_pxf_latest -> mapr_dockerfile - gp6_centos7_pxf_latest -> mapr_dockerfile - mapr_dockerfile -> gp6_centos7_pxf_mapr_sha[label="CloudBuild (install MapR)"] - gp6_centos7_pxf_mapr_sha -> gp6_centos7_pxf_mapr_latest[label="tag (concourse pipeline)"] - - gp6_centos7_pxf_latest -> server_dockerfile - server_dockerfile -> gp6_centos7_pxf_hdp2_sha[label="CloudBuild (add singlecluster, build deps, & automation deps)"] - gp6_centos7_pxf_hdp2_sha -> gp6_centos7_pxf_hdp2_latest - - gp5_centos7_pxf_latest -> certification - gp5_centos7_pxf_latest -> build - gp5_centos7_pxf_latest -> pr - - gp6_centos7_pxf_latest -> certification - gp6_centos7_pxf_latest -> longevity - gp6_centos7_pxf_latest -> perf - gp6_centos7_pxf_latest -> build - gp6_centos7_pxf_latest -> pr - - gp6_rhel8_pxf_latest -> certification - gp6_rhel8_pxf_latest -> perf - gp6_rhel8_pxf_latest -> build - gp6_rhel8_pxf_latest -> pr - - gp6_ubuntu18_pxf_latest -> certification - gp6_ubuntu18_pxf_latest -> build - gp6_ubuntu18_pxf_latest -> pr - - gp6_oel7_pxf_latest -> build - gp6_oel7_pxf_latest -> pr - - gp7_centos7_pxf_latest -> build - gp7_centos7_pxf_latest -> pr - - gp7_rhel8_pxf_latest -> certification - gp7_rhel8_pxf_latest -> build - gp7_rhel8_pxf_latest -> pr - - gp7_rocky8_pxf_latest -> certification - gp7_rocky8_pxf_latest -> build - gp7_rocky8_pxf_latest -> pr - - gp7_ubuntu18_pxf_latest -> build - gp7_ubuntu18_pxf_latest -> pr - - rpm_centos7_latest -> build - rpm_rhel8_latest -> build - rpm_rocky8_latest -> build - - gp6_centos7_pxf_mapr_latest -> build[label="Conditionally added based off mapr variable"] - -} diff --git a/concourse/docker/diagram/images.svg b/concourse/docker/diagram/images.svg deleted file mode 100644 index 02aa07a9d4..0000000000 --- a/concourse/docker/diagram/images.svg +++ /dev/null @@ -1,900 +0,0 @@ - - - - - - -pxf_container_image_flow - - -cluster_dockerhub - -Official DockerHub - - -cluster_gcr_images - -GP RelEng Images (gcr.io/data-gpdb-public-images) - - -cluster_pxf_dev_base - -pxf-dev-base/cloudbuild.yaml - - -cluster_rpmrebuild - -rpmrebuild/cloudbuild.yaml - - -cluster_gcr_data_gpdb_ud - -gcr.io/data-gpdb-ud - - -cluster_gpdb_pxf_dev - -gpdb-pxf-dev - - -cluster_pipelines - -Pipelines - - -cluster_local_use_only - -For local development use - - -cluster_pxf_dev_server - -pxf-dev-server/cloudbuild.yaml - - -cluster_mapr - -mapr/cloudbuild.yaml - - -cluster_gcr_data_gpdb_ud_mapr - -MapR Images (gcr.io/data-gpdb-ud) - - -cluster_gcr_data_gpdb_ud_hdp2 - -HDP2 (gcr.io/data-gpdb-ud) - - -cluster_gcr_images_private - -GP RelEng Images (gcr.io/data-gpdb-private-images) - - - -centos7 - - - - -centos:7 - - - -rpm_docker_centos7 - - - -centos/Dockerfile - - - -centos7->rpm_docker_centos7 - - - - - -gp5_centos7_latest - - - - -centos-gpdb-dev:7-gcc6.2-llvm3.7 - - - -gp5_centos7_dockerfile - - - -gpdb5/centos7 - - - -gp5_centos7_latest->gp5_centos7_dockerfile - - - - - -gp6_centos7_latest - - - - -gpdb6-centos7-test:latest - - - -gp6_centos7_dockerfile - - - -gpdb6/centos7 - - - -gp6_centos7_latest->gp6_centos7_dockerfile - - - - - -gp6_ubuntu18_latest - - - - -gpdb6-ubuntu18.04-test:latest - - - -gp6_ubuntu18_dockerfile - - - -gpdb6/ubuntu18.04 - - - -gp6_ubuntu18_latest->gp6_ubuntu18_dockerfile - - - - - -gp6_oel7_latest - - - - -gpdb6-oel7-test:latest - - - -gp6_oel7_dockerfile - - - -gpdb6/oel7 - - - -gp6_oel7_latest->gp6_oel7_dockerfile - - - - - -gp7_centos7_latest - - - - -gpdb7-centos7-test:latest - - - -gp7_centos7_dockerfile - - - -gpdb7/centos7 - - - -gp7_centos7_latest->gp7_centos7_dockerfile - - - - - -gp7_rocky8_latest - - - - -gpdb7-rocky8-test:latest - - - -gp7_rocky8_dockerfile - - - -gpdb7/rocky8 - - - -gp7_rocky8_latest->gp7_rocky8_dockerfile - - - - - -rpm_docker_rhel8 - - - -rhel/Dockerfile - - - -gp7_rocky8_latest->rpm_docker_rhel8 - - - - - -gp7_ubuntu18_latest - - - - -gpdb7-ubuntu18.04-test:latest - - - -gp7_ubuntu18_dockerfile - - - -gpdb7/ubuntu18.04 - - - -gp7_ubuntu18_latest->gp7_ubuntu18_dockerfile - - - - - -gp6_rhel8_latest - - - - -gpdb6-rhel8-test:latest - - - -gp6_rhel8_dockerfile - - - -gpdb6/rhel8 - - - -gp6_rhel8_latest->gp6_rhel8_dockerfile - - - - - -gp6_rhel8_latest->rpm_docker_rhel8 - - - - - -gp7_rhel8_latest - - - - -gpdb7-rhel8-test:latest - - - -gp7_rhel8_dockerfile - - - -gpdb7/rhel8 - - - -gp7_rhel8_latest->gp7_rhel8_dockerfile - - - - - -gp5_centos7_pxf_sha - -gpdb5-centos7-test-pxf:$COMMIT_SHA - - - -gp5_centos7_dockerfile->gp5_centos7_pxf_sha - - -CloudBuild - - - -gp6_centos7_pxf_sha - -gpdb6-centos7-test-pxf:$COMMIT_SHA - - - -gp6_centos7_dockerfile->gp6_centos7_pxf_sha - - -CloudBuild - - - -gp6_rhel8_pxf_sha - -gpdb6-rhel8-test-pxf:$COMMIT_SHA - - - -gp6_rhel8_dockerfile->gp6_rhel8_pxf_sha - - -CloudBuild - - - -gp6_ubuntu18_pxf_sha - -gpdb6-ubuntu18.04-test-pxf:$COMMIT_SHA - - - -gp6_ubuntu18_dockerfile->gp6_ubuntu18_pxf_sha - - -CloudBuild - - - -gp6_oel7_pxf_sha - -gpdb6-oel7-test-pxf:$COMMIT_SHA - - - -gp6_oel7_dockerfile->gp6_oel7_pxf_sha - - -CloudBuild - - - -gp7_centos7_pxf_sha - -gpdb7-centos7-test-pxf:$COMMIT_SHA - - - -gp7_centos7_dockerfile->gp7_centos7_pxf_sha - - -CloudBuild - - - -gp7_rhel8_pxf_sha - -gpdb7-rhel8-test-pxf:$COMMIT_SHA - - - -gp7_rhel8_dockerfile->gp7_rhel8_pxf_sha - - -CloudBuild - - - -gp7_rocky8_pxf_sha - -gpdb7-rocky8-test-pxf:$COMMIT_SHA - - - -gp7_rocky8_dockerfile->gp7_rocky8_pxf_sha - - -CloudBuild - - - -gp7_ubuntu18_pxf_sha - -gpdb7-ubuntu18.04-test-pxf:$COMMIT_SHA - - - -gp7_ubuntu18_dockerfile->gp7_ubuntu18_pxf_sha - - -CloudBuild - - - -rpm_centos7_latest - -rpmrebuild-centos7:latest - - - -rpm_docker_centos7->rpm_centos7_latest - - - - - -rpm_rhel8_latest - -rpmrebuild-rhel8:latest - - - -rpm_docker_rhel8->rpm_rhel8_latest - - - - - -rpm_rocky8_latest - -rpmrebuild-rocky8:latest - - - -rpm_docker_rhel8->rpm_rocky8_latest - - - - - -gp5_centos7_pxf_latest - -gpdb5-centos7-test-pxf:latest - - - -gp5_centos7_pxf_sha->gp5_centos7_pxf_latest - - -tag (concourse pipeline) - - - -gp6_centos7_pxf_latest - -gpdb6-centos7-test-pxf:latest - - - -gp6_centos7_pxf_sha->gp6_centos7_pxf_latest - - -tag (concourse pipeline) - - - -gp6_rhel8_pxf_latest - -gpdb6-rhel8-test-pxf:latest - - - -gp6_rhel8_pxf_sha->gp6_rhel8_pxf_latest - - -tag (concourse pipeline) - - - -gp6_ubuntu18_pxf_latest - -gpdb6-ubuntu18.04-test-pxf:latest - - - -gp6_ubuntu18_pxf_sha->gp6_ubuntu18_pxf_latest - - -tag (concourse pipeline) - - - -gp6_oel7_pxf_latest - -gpdb6-oel7-test-pxf:latest - - - -gp6_oel7_pxf_sha->gp6_oel7_pxf_latest - - -tag (concourse pipeline) - - - -gp7_centos7_pxf_latest - -gpdb7-centos7-test-pxf:latest - - - -gp7_centos7_pxf_sha->gp7_centos7_pxf_latest - - -tag (concourse pipeline) - - - -gp7_rhel8_pxf_latest - -gpdb7-rhel8-test-pxf:latest - - - -gp7_rhel8_pxf_sha->gp7_rhel8_pxf_latest - - -tag (concourse pipeline) - - - -gp7_rocky8_pxf_latest - -gpdb7-rocky8-test-pxf:latest - - - -gp7_rocky8_pxf_sha->gp7_rocky8_pxf_latest - - -tag (concourse pipeline) - - - -gp7_ubuntu18_pxf_latest - -gpdb7-ubuntu18.04-test-pxf:latest - - - -gp7_ubuntu18_pxf_sha->gp7_ubuntu18_pxf_latest - - -tag (concourse pipeline) - - - -certification - -certification - - - -gp5_centos7_pxf_latest->certification - - - - - -build - -pxf-build - - - -gp5_centos7_pxf_latest->build - - - - - -pr - -pxf_pr_pipeline - - - -gp5_centos7_pxf_latest->pr - - - - - -mapr_dockerfile - - - -Dockerfile - - - -gp5_centos7_pxf_latest->mapr_dockerfile - - - - - -gp6_centos7_pxf_latest->certification - - - - - -perf - -perf - - - -gp6_centos7_pxf_latest->perf - - - - - -longevity - -longevity - - - -gp6_centos7_pxf_latest->longevity - - - - - -gp6_centos7_pxf_latest->build - - - - - -gp6_centos7_pxf_latest->pr - - - - - -server_dockerfile - - - -Dockerfile - - - -gp6_centos7_pxf_latest->server_dockerfile - - - - - -gp6_centos7_pxf_latest->mapr_dockerfile - - - - - -gp6_rhel8_pxf_latest->certification - - - - - -gp6_rhel8_pxf_latest->perf - - - - - -gp6_rhel8_pxf_latest->build - - - - - -gp6_rhel8_pxf_latest->pr - - - - - -gp6_ubuntu18_pxf_latest->certification - - - - - -gp6_ubuntu18_pxf_latest->build - - - - - -gp6_ubuntu18_pxf_latest->pr - - - - - -gp6_oel7_pxf_latest->build - - - - - -gp6_oel7_pxf_latest->pr - - - - - -gp7_centos7_pxf_latest->build - - - - - -gp7_centos7_pxf_latest->pr - - - - - -gp7_rhel8_pxf_latest->certification - - - - - -gp7_rhel8_pxf_latest->build - - - - - -gp7_rhel8_pxf_latest->pr - - - - - -gp7_rocky8_pxf_latest->certification - - - - - -gp7_rocky8_pxf_latest->build - - - - - -gp7_rocky8_pxf_latest->pr - - - - - -gp7_ubuntu18_pxf_latest->build - - - - - -gp7_ubuntu18_pxf_latest->pr - - - - - -rpm_centos7_latest->build - - - - - -rpm_rhel8_latest->build - - - - - -rpm_rocky8_latest->build - - - - - -gp6_centos7_pxf_hdp2_sha - -gpdb-pxf-dev/gpdb6-centos7-test-pxf-hdp2:$COMMIT_SHA - - - -server_dockerfile->gp6_centos7_pxf_hdp2_sha - - -CloudBuild (add singlecluster, build deps, & automation deps) - - - -gp6_centos7_pxf_mapr_sha - -gpdb-pxf-dev/gpdb6-centos7-test-pxf-mapr:$COMMIT_SHA - - - -mapr_dockerfile->gp6_centos7_pxf_mapr_sha - - -CloudBuild (install MapR) - - - -gp6_centos7_pxf_mapr_latest - -gpdb-pxf-dev/gpdb6-centos7-test-pxf-mapr:latest - - - -gp6_centos7_pxf_mapr_sha->gp6_centos7_pxf_mapr_latest - - -tag (concourse pipeline) - - - -gp6_centos7_pxf_mapr_latest->build - - -Conditionally added based off mapr variable - - - -gp6_centos7_pxf_hdp2_latest - -gpdb-pxf-dev/gpdb6-centos7-test-pxf-hdp2 - - - -gp6_centos7_pxf_hdp2_sha->gp6_centos7_pxf_hdp2_latest - - - - - diff --git a/concourse/docker/pxf-dev-base/README.md b/concourse/docker/pxf-dev-base/README.md index c1a49d6112..bdc5043dff 100644 --- a/concourse/docker/pxf-dev-base/README.md +++ b/concourse/docker/pxf-dev-base/README.md @@ -65,16 +65,16 @@ command to build the image: . popd -### Docker gpdb6-rhel8-test-pxf-image image +### Docker gpdb6-rocky8-test-pxf-image image Build this image for Greenplum 6 running on Rhel 8. Run the following command to build the image: pushd ~/workspace/pxf/concourse/docker/pxf-dev-base/ docker build \ - --build-arg=BASE_IMAGE=gcr.io/data-gpdb-private-images/gpdb6-rhel8-test:latest \ - --tag=gpdb6-rhel8-test-pxf \ - -f ~/workspace/pxf/concourse/docker/pxf-dev-base/gpdb6/rhel8/Dockerfile \ + --build-arg=BASE_IMAGE=gcr.io/data-gpdb-public-images/gpdb6-rocky8-test:latest \ + --tag=gpdb6-rocky8-test-pxf \ + -f ~/workspace/pxf/concourse/docker/pxf-dev-base/gpdb6/rocky8/Dockerfile \ . popd @@ -108,36 +108,7 @@ following command to build the image: ## Greenplum 7 Images -### Docker gpdb7-centos7-test-pxf-image image - -TODO: ===> remove this if Greenplum 7 will not be supported on Centos7 <=== - -Build this image for Greenplum 7 running on CentOS 7. Run the following -command to build the image: - - pushd ~/workspace/pxf/concourse/docker/pxf-dev-base/ - docker build \ - --build-arg=BASE_IMAGE=gcr.io/data-gpdb-public-images/gpdb7-centos7-test:latest \ - --build-arg=GO_VERSION=${GO_VERSION} \ - --tag=gpdb7-centos7-test-pxf \ - -f ~/workspace/pxf/concourse/docker/pxf-dev-base/gpdb7/centos7/Dockerfile \ - . - popd - -### Docker gpdb7-rhel8-test-pxf-image image - -Build this image for Greenplum 7 running on Rhel 8. Run the following -command to build the image: - - pushd ~/workspace/pxf/concourse/docker/pxf-dev-base/ - docker build \ - --build-arg=BASE_IMAGE=gcr.io/data-gpdb-private-images/gpdb7-rhel8-test:latest \ - --tag=gpdb7-rhel8-test-pxf \ - -f ~/workspace/pxf/concourse/docker/pxf-dev-base/gpdb7/rhel8/Dockerfile \ - . - popd - -### Docker gpdb7-rhel8-test-pxf-image image +### Docker gpdb7-rocky8-test-pxf-image image Build this image for Greenplum 7 running on Rocky 8. Run the following command to build the image: @@ -149,17 +120,3 @@ command to build the image: -f ~/workspace/pxf/concourse/docker/pxf-dev-base/gpdb7/rocky8/Dockerfile \ . popd - -### Docker gpdb7-ubuntu18.04-test-pxf-image image - -Build this image for Greenplum 7 running on Ubuntu 18.04. Run the following -command to build the image: - - pushd ~/workspace/pxf/concourse/docker/pxf-dev-base/ - docker build \ - --build-arg=BASE_IMAGE=gcr.io/data-gpdb-public-images/gpdb7-ubuntu18.04-test:latest \ - --build-arg=GO_VERSION=${GO_VERSION} \ - --tag=gpdb7-ubuntu18.04-test-pxf \ - -f ~/workspace/pxf/concourse/docker/pxf-dev-base/gpdb7/ubuntu18.04/Dockerfile \ - . - popd diff --git a/concourse/docker/pxf-dev-base/cloudbuild.yaml b/concourse/docker/pxf-dev-base/cloudbuild.yaml index d817965fdd..7764816558 100644 --- a/concourse/docker/pxf-dev-base/cloudbuild.yaml +++ b/concourse/docker/pxf-dev-base/cloudbuild.yaml @@ -44,7 +44,6 @@ steps: # GPDB 6 Images ############################################################################## -# Corresponds to the docker-gpdb-pxf-dev-centos7 job in the docker pipeline - name: 'gcr.io/cloud-builders/docker' id: gpdb6-centos7-test-pxf-image-cache entrypoint: 'bash' @@ -70,33 +69,31 @@ steps: waitFor: - gpdb6-centos7-test-pxf-image-cache -# Corresponds to the docker-gpdb-pxf-dev-rhel8 job in the docker pipeline - name: 'gcr.io/cloud-builders/docker' - id: gpdb6-rhel8-test-pxf-image-cache + id: gpdb6-rocky8-test-pxf-image-cache entrypoint: 'bash' args: - '-c' - | mkdir -p /workspace/build - docker pull gcr.io/$PROJECT_ID/gpdb-pxf-dev/gpdb6-rhel8-test-pxf:latest || exit 0 + docker pull gcr.io/$PROJECT_ID/gpdb-pxf-dev/gpdb6-rocky8-test-pxf:latest || exit 0 waitFor: ['-'] - name: 'gcr.io/cloud-builders/docker' - id: gpdb6-rhel8-test-pxf-image + id: gpdb6-rocky8-test-pxf-image args: - 'build' - - '--build-arg=BASE_IMAGE=${_PRIVATE_BASE_IMAGE_REPOSITORY}/gpdb6-rhel8-test:latest' + - '--build-arg=BASE_IMAGE=${_BASE_IMAGE_REPOSITORY}/gpdb6-rocky8-test:latest' - '--build-arg=GO_VERSION=${_GO_VERSION}' - - '--tag=gcr.io/$PROJECT_ID/gpdb-pxf-dev/gpdb6-rhel8-test-pxf:$COMMIT_SHA' + - '--tag=gcr.io/$PROJECT_ID/gpdb-pxf-dev/gpdb6-rocky8-test-pxf:$COMMIT_SHA' - '--cache-from' - - 'gcr.io/$PROJECT_ID/gpdb-pxf-dev/gpdb6-rhel8-test-pxf:latest' + - 'gcr.io/$PROJECT_ID/gpdb-pxf-dev/gpdb6-rocky8-test-pxf:latest' - '-f' - - 'concourse/docker/pxf-dev-base/gpdb6/rhel8/Dockerfile' + - 'concourse/docker/pxf-dev-base/gpdb6/rocky8/Dockerfile' - '/workspace/build/' waitFor: - - gpdb6-rhel8-test-pxf-image-cache + - gpdb6-rocky8-test-pxf-image-cache -# Corresponds to the docker-gpdb-pxf-dev-ubuntu18 job in the docker pipeline - name: 'gcr.io/cloud-builders/docker' id: gpdb6-ubuntu18.04-test-pxf-image-cache entrypoint: 'bash' @@ -152,59 +149,7 @@ steps: # GPDB 7 Images ############################################################################## -# Greenplum 7 Centos 7 Image -- name: 'gcr.io/cloud-builders/docker' - id: gpdb7-centos7-test-pxf-image-cache - entrypoint: 'bash' - args: - - '-c' - - | - mkdir -p /workspace/build - docker pull gcr.io/$PROJECT_ID/gpdb-pxf-dev/gpdb7-centos7-test-pxf:latest || exit 0 - waitFor: ['-'] - -- name: 'gcr.io/cloud-builders/docker' - id: gpdb7-centos7-test-pxf-image - args: - - 'build' - - '--build-arg=BASE_IMAGE=${_BASE_IMAGE_REPOSITORY}/gpdb7-centos7-test:latest' - - '--build-arg=GO_VERSION=${_GO_VERSION}' - - '--tag=gcr.io/$PROJECT_ID/gpdb-pxf-dev/gpdb7-centos7-test-pxf:$COMMIT_SHA' - - '--cache-from' - - 'gcr.io/$PROJECT_ID/gpdb-pxf-dev/gpdb7-centos7-test-pxf:latest' - - '-f' - - 'concourse/docker/pxf-dev-base/gpdb7/centos7/Dockerfile' - - '/workspace/build/' - waitFor: - - gpdb7-centos7-test-pxf-image-cache - -# Greenplum 7 Rhel 8 Image -- name: 'gcr.io/cloud-builders/docker' - id: gpdb7-rhel8-test-pxf-image-cache - entrypoint: 'bash' - args: - - '-c' - - | - mkdir -p /workspace/build - docker pull gcr.io/$PROJECT_ID/gpdb-pxf-dev/gpdb7-rhel8-test-pxf:latest || exit 0 - waitFor: ['-'] - -- name: 'gcr.io/cloud-builders/docker' - id: gpdb7-rhel8-test-pxf-image - args: - - 'build' - - '--build-arg=BASE_IMAGE=${_PRIVATE_BASE_IMAGE_REPOSITORY}/gpdb7-rhel8-test:latest' - - '--build-arg=GO_VERSION=${_GO_VERSION}' - - '--tag=gcr.io/$PROJECT_ID/gpdb-pxf-dev/gpdb7-rhel8-test-pxf:$COMMIT_SHA' - - '--cache-from' - - 'gcr.io/$PROJECT_ID/gpdb-pxf-dev/gpdb7-rhel8-test-pxf:latest' - - '-f' - - 'concourse/docker/pxf-dev-base/gpdb7/rhel8/Dockerfile' - - '/workspace/build/' - waitFor: - - gpdb7-rhel8-test-pxf-image-cache - - # Greenplum 7 Rocky 8 Image +# Greenplum 7 Rocky 8 Image - name: 'gcr.io/cloud-builders/docker' id: gpdb7-rocky8-test-pxf-image-cache entrypoint: 'bash' @@ -230,32 +175,6 @@ steps: waitFor: - gpdb7-rocky8-test-pxf-image-cache -# Greenplum 7 Ubuntu 18.04 Image -- name: 'gcr.io/cloud-builders/docker' - id: gpdb7-ubuntu18.04-test-pxf-image-cache - entrypoint: 'bash' - args: - - '-c' - - | - mkdir -p /workspace/build - docker pull gcr.io/$PROJECT_ID/gpdb-pxf-dev/gpdb7-ubuntu18.04-test-pxf:latest || exit 0 - waitFor: ['-'] - -- name: 'gcr.io/cloud-builders/docker' - id: gpdb7-ubuntu18.04-test-pxf-image - args: - - 'build' - - '--build-arg=BASE_IMAGE=${_BASE_IMAGE_REPOSITORY}/gpdb7-ubuntu18.04-test:latest' - - '--build-arg=GO_VERSION=${_GO_VERSION}' - - '--tag=gcr.io/$PROJECT_ID/gpdb-pxf-dev/gpdb7-ubuntu18.04-test-pxf:$COMMIT_SHA' - - '--cache-from' - - 'gcr.io/$PROJECT_ID/gpdb-pxf-dev/gpdb7-ubuntu18.04-test-pxf:latest' - - '-f' - - 'concourse/docker/pxf-dev-base/gpdb7/ubuntu18.04/Dockerfile' - - '/workspace/build/' - waitFor: - - gpdb7-ubuntu18.04-test-pxf-image-cache - substitutions: _GO_VERSION: '1.19.6' # default values @@ -263,10 +182,7 @@ substitutions: images: - 'gcr.io/$PROJECT_ID/gpdb-pxf-dev/gpdb5-centos7-test-pxf:$COMMIT_SHA' - 'gcr.io/$PROJECT_ID/gpdb-pxf-dev/gpdb6-centos7-test-pxf:$COMMIT_SHA' -- 'gcr.io/$PROJECT_ID/gpdb-pxf-dev/gpdb6-rhel8-test-pxf:$COMMIT_SHA' +- 'gcr.io/$PROJECT_ID/gpdb-pxf-dev/gpdb6-rocky8-test-pxf:$COMMIT_SHA' - 'gcr.io/$PROJECT_ID/gpdb-pxf-dev/gpdb6-ubuntu18.04-test-pxf:$COMMIT_SHA' - 'gcr.io/$PROJECT_ID/gpdb-pxf-dev/gpdb6-oel7-test-pxf:$COMMIT_SHA' -- 'gcr.io/$PROJECT_ID/gpdb-pxf-dev/gpdb7-centos7-test-pxf:$COMMIT_SHA' -- 'gcr.io/$PROJECT_ID/gpdb-pxf-dev/gpdb7-rhel8-test-pxf:$COMMIT_SHA' - 'gcr.io/$PROJECT_ID/gpdb-pxf-dev/gpdb7-rocky8-test-pxf:$COMMIT_SHA' -- 'gcr.io/$PROJECT_ID/gpdb-pxf-dev/gpdb7-ubuntu18.04-test-pxf:$COMMIT_SHA' diff --git a/concourse/docker/pxf-dev-base/gpdb6/rhel8/Dockerfile b/concourse/docker/pxf-dev-base/gpdb6/rocky8/Dockerfile similarity index 98% rename from concourse/docker/pxf-dev-base/gpdb6/rhel8/Dockerfile rename to concourse/docker/pxf-dev-base/gpdb6/rocky8/Dockerfile index 9467e788e9..12b9cb539b 100644 --- a/concourse/docker/pxf-dev-base/gpdb6/rhel8/Dockerfile +++ b/concourse/docker/pxf-dev-base/gpdb6/rocky8/Dockerfile @@ -1,4 +1,4 @@ -ARG BASE_IMAGE=gcr.io/data-gpdb-private-images/gpdb6-rhel8-test:latest +ARG BASE_IMAGE=gcr.io/data-gpdb-public-images/gpdb6-rocky8-test:latest FROM ${BASE_IMAGE} diff --git a/concourse/docker/rpmrebuild/cloudbuild.yaml b/concourse/docker/rpmrebuild/cloudbuild.yaml index 041304443c..23e0f671ef 100644 --- a/concourse/docker/rpmrebuild/cloudbuild.yaml +++ b/concourse/docker/rpmrebuild/cloudbuild.yaml @@ -15,32 +15,19 @@ steps: - 'concourse/docker/rpmrebuild/centos' waitFor: ['-'] -# Builds the rpmrebuild-rhel8 image -- name: 'gcr.io/cloud-builders/docker' - id: rpmrebuild-rhel8-image - args: - - 'build' - - '--build-arg=BASE_IMAGE=${_PRIVATE_BASE_IMAGE_REPOSITORY}/gpdb6-rhel8-test:latest' - - '--tag=gcr.io/$PROJECT_ID/rpmrebuild-rhel8:latest' - - '-f' - - 'concourse/docker/rpmrebuild/rhel/Dockerfile' - - 'concourse/docker/rpmrebuild/rhel' - waitFor: ['-'] - - # Builds the rpmrebuild-rocky8 image +# Builds the rpmrebuild-rocky8 image - name: 'gcr.io/cloud-builders/docker' id: rpmrebuild-rocky8-image args: - 'build' - - '--build-arg=BASE_IMAGE=${_BASE_IMAGE_REPOSITORY}/gpdb7-rocky8-test:latest' + - '--build-arg=BASE_IMAGE=rockylinux:8' - '--tag=gcr.io/$PROJECT_ID/rpmrebuild-rocky8:latest' - '-f' - - 'concourse/docker/rpmrebuild/rhel/Dockerfile' - - 'concourse/docker/rpmrebuild/rhel' + - 'concourse/docker/rpmrebuild/rocky/Dockerfile' + - 'concourse/docker/rpmrebuild/rocky' waitFor: ['-'] # Push images from Cloud Build to Container Registry images: - 'gcr.io/$PROJECT_ID/rpmrebuild-centos7:latest' - - 'gcr.io/$PROJECT_ID/rpmrebuild-rhel8:latest' - 'gcr.io/$PROJECT_ID/rpmrebuild-rocky8:latest' diff --git a/concourse/docker/rpmrebuild/rhel/Dockerfile b/concourse/docker/rpmrebuild/rhel/Dockerfile deleted file mode 100644 index 54b3392d8b..0000000000 --- a/concourse/docker/rpmrebuild/rhel/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -ARG BASE_IMAGE=gcr.io/data-gpdb-private-images/gpdb6-rhel8-test:latest - -FROM centos:7 as downloader - -# download source RPM for rpmrebuild from EPEL repository for EL7 -RUN yum install -y epel-release && yumdownloader --source rpmrebuild - -FROM ${BASE_IMAGE} - -COPY --from=downloader /rpmrebuild-*.src.rpm / - -# build EL8 RPM for rpmrebuild from the downloaded source RPM -RUN rpmbuild --rebuild rpmrebuild-*.src.rpm \ - && rpm -i /root/rpmbuild/RPMS/noarch/rpmrebuild-*.el8.noarch.rpm \ - && rm rpmrebuild-*.src.rpm \ - && rm -rf /root/rpmbuild diff --git a/concourse/docker/rpmrebuild/rocky/Dockerfile b/concourse/docker/rpmrebuild/rocky/Dockerfile new file mode 100644 index 0000000000..ced3b26c2c --- /dev/null +++ b/concourse/docker/rpmrebuild/rocky/Dockerfile @@ -0,0 +1,6 @@ +ARG BASE_IMAGE=rockylinux:8 + +FROM ${BASE_IMAGE} + +RUN dnf install -y epel-release +RUN dnf install -y rpmrebuild diff --git a/concourse/pipelines/certification_pipeline.yml b/concourse/pipelines/certification_pipeline.yml index 2e3be8ddbb..bb6571a37d 100644 --- a/concourse/pipelines/certification_pipeline.yml +++ b/concourse/pipelines/certification_pipeline.yml @@ -89,11 +89,11 @@ resources: username: _json_key password: ((ud/pxf/secrets/pxf-cloudbuild-service-account-key)) -- name: gpdb6-pxf-dev-rhel8-image +- name: gpdb6-pxf-dev-rocky8-image type: registry-image icon: docker source: - repository: gcr.io/data-gpdb-ud/gpdb-pxf-dev/gpdb6-rhel8-test-pxf + repository: gcr.io/data-gpdb-ud/gpdb-pxf-dev/gpdb6-rocky8-test-pxf tag: latest username: _json_key password: ((ud/pxf/secrets/pxf-cloudbuild-service-account-key)) @@ -266,14 +266,14 @@ jobs: - get: pxf_package resource: pxf_gp6_rpm_rhel8 trigger: true - - get: gpdb6-pxf-dev-rhel8-image + - get: gpdb6-pxf-dev-rocky8-image - get: ccp-7-image - get: pxf-automation-dependencies - get: singlecluster resource: singlecluster-hdp2 - task: Test GPDB-6 with PXF-GP6-HDP2 on RHEL8 file: pxf_src/concourse/tasks/test_certification.yml - image: gpdb6-pxf-dev-rhel8-image + image: gpdb6-pxf-dev-rocky8-image params: ACCESS_KEY_ID: ((tf-machine-access-key-id)) GP_VER: 6 diff --git a/concourse/pipelines/cloudbuild_pipeline.yml b/concourse/pipelines/cloudbuild_pipeline.yml index 22748ad5d6..846433bce9 100644 --- a/concourse/pipelines/cloudbuild_pipeline.yml +++ b/concourse/pipelines/cloudbuild_pipeline.yml @@ -109,7 +109,7 @@ jobs: GOOGLE_CREDENTIALS: ((ud/pxf/secrets/pxf-cloudbuild-service-account-key)) GOOGLE_PROJECT_ID: ((ud/pxf/common/google-project-id)) GOOGLE_ZONE: ((ud/pxf/common/google-zone)) - IMAGE_LIST: "gpdb5-centos7-test-pxf gpdb6-centos7-test-pxf gpdb6-rhel8-test-pxf gpdb6-ubuntu18.04-test-pxf gpdb6-oel7-test-pxf gpdb7-centos7-test-pxf gpdb7-rhel8-test-pxf gpdb7-rocky8-test-pxf gpdb7-ubuntu18.04-test-pxf" + IMAGE_LIST: "gpdb5-centos7-test-pxf gpdb6-centos7-test-pxf gpdb6-rocky8-test-pxf gpdb6-ubuntu18.04-test-pxf gpdb6-oel7-test-pxf gpdb7-rocky8-test-pxf" config: platform: linux inputs: diff --git a/concourse/pipelines/templates/build_pipeline-tpl.yml b/concourse/pipelines/templates/build_pipeline-tpl.yml index baaaa171f8..5a576b4c7b 100644 --- a/concourse/pipelines/templates/build_pipeline-tpl.yml +++ b/concourse/pipelines/templates/build_pipeline-tpl.yml @@ -376,15 +376,11 @@ resources: {% endfor %} {# gp5, gp6 #} {% for gp_ver in range(6, 8) %} -- name: gpdb[[gp_ver]]-pxf-dev-rhel8-image +- name: gpdb[[gp_ver]]-pxf-dev-rocky8-image type: registry-image icon: docker source: -{% if gp_ver == 6 %} - repository: gcr.io/data-gpdb-ud/gpdb-pxf-dev/gpdb[[gp_ver]]-rhel8-test-pxf -{% else %} repository: gcr.io/data-gpdb-ud/gpdb-pxf-dev/gpdb[[gp_ver]]-rocky8-test-pxf -{% endif %} tag: latest username: _json_key password: ((ud/pxf/secrets/pxf-cloudbuild-service-account-key)) @@ -429,15 +425,6 @@ resources: username: _json_key password: ((ud/pxf/secrets/pxf-cloudbuild-service-account-key)) -- name: rpmrebuild-rhel8-image - type: registry-image - icon: docker - source: - repository: gcr.io/data-gpdb-ud/rpmrebuild-rhel8 - tag: latest - username: _json_key - password: ((ud/pxf/secrets/pxf-cloudbuild-service-account-key)) - - name: rpmrebuild-rocky8-image type: registry-image icon: docker @@ -742,10 +729,10 @@ jobs: trigger: true - get: gpdb_package resource: gpdb[[gp_ver]]_rhel8_rpm_latest-0 - - get: gpdb[[gp_ver]]-pxf-dev-rhel8-image + - get: gpdb[[gp_ver]]-pxf-dev-rocky8-image - get: pxf-build-dependencies - task: Build PXF-GP[[gp_ver]] on RHEL8 - image: gpdb[[gp_ver]]-pxf-dev-rhel8-image + image: gpdb[[gp_ver]]-pxf-dev-rocky8-image file: pxf_src/concourse/tasks/build.yml params: LICENSE: ((ud/pxf/common/rpm-license)) @@ -766,7 +753,7 @@ jobs: - get: gpdb_package resource: gpdb[[gp_ver]]_rhel8_rpm_latest-0 passed: [Build PXF-GP[[gp_ver]] on RHEL8] - - get: gpdb[[gp_ver]]-pxf-dev-rhel8-image + - get: gpdb[[gp_ver]]-pxf-dev-rocky8-image passed: [Build PXF-GP[[gp_ver]] on RHEL8] - get: pxf-automation-dependencies {% if gp_ver == 7 %} @@ -776,7 +763,7 @@ jobs: resource: singlecluster-hdp2 - task: Test PXF-GP[[gp_ver]]-HDP2 on RHEL8 file: pxf_src/concourse/tasks/test.yml - image: gpdb[[gp_ver]]-pxf-dev-rhel8-image + image: gpdb[[gp_ver]]-pxf-dev-rocky8-image params: ACCESS_KEY_ID: ((tf-machine-access-key-id)) GP_VER: [[gp_ver]] @@ -1691,7 +1678,7 @@ jobs: - get: pxf_tarball passed: [Compatibility Gate for PXF-GP] resource: pxf_gp[[gp_ver]]_tarball_rhel8 - - get: gpdb[[gp_ver]]-pxf-dev-rhel8-image + - get: gpdb[[gp_ver]]-pxf-dev-rocky8-image - get: singlecluster resource: singlecluster-hdp2 {% for i in range(1, num_gpdb6_versions) %} @@ -1704,7 +1691,7 @@ jobs: steps: {% for i in range(1, num_gpdb6_versions) %} - task: Test Against Greenplum Latest - [[i]] RHEL8 - image: gpdb[[gp_ver]]-pxf-dev-rhel8-image + image: gpdb[[gp_ver]]-pxf-dev-rocky8-image config: platform: linux inputs: @@ -1792,7 +1779,6 @@ jobs: trigger: true - get: google-cloud-sdk-slim-image - get: rpmrebuild-centos7-image - - get: rpmrebuild-rhel8-image - get: rpmrebuild-rocky8-image - get: gpdb6-pxf-dev-ubuntu18-image - task: Get PXF-GP5 and PXF-GP6 Artifacts from Releases Directory @@ -1815,7 +1801,7 @@ jobs: {% endfor %} {% set gp_ver = 6 %} - task: Add OSL file to PXF-GP[[gp_ver]] RPM on RHEL8 - image: rpmrebuild-rhel8-image + image: rpmrebuild-rocky8-image file: pxf_src/concourse/tasks/add_osl_rpm.yml params: GP_VER: [[gp_ver]] diff --git a/concourse/pipelines/templates/dev_build_pipeline-tpl.yml b/concourse/pipelines/templates/dev_build_pipeline-tpl.yml index 489dc371b4..21863fb011 100644 --- a/concourse/pipelines/templates/dev_build_pipeline-tpl.yml +++ b/concourse/pipelines/templates/dev_build_pipeline-tpl.yml @@ -193,15 +193,11 @@ resources: {% endfor %} {# gp5, gp6, and gp7 #} {% for gp_ver in range(6, 8) %} -- name: gpdb[[gp_ver]]-pxf-dev-rhel8-image +- name: gpdb[[gp_ver]]-pxf-dev-rocky8-image type: registry-image icon: docker source: -{% if gp_ver == 6 %} - repository: gcr.io/data-gpdb-ud/gpdb-pxf-dev/gpdb[[gp_ver]]-rhel8-test-pxf -{% else %} repository: gcr.io/data-gpdb-ud/gpdb-pxf-dev/gpdb[[gp_ver]]-rocky8-test-pxf -{% endif %} tag: latest username: _json_key password: ((ud/pxf/secrets/pxf-cloudbuild-service-account-key)) @@ -535,10 +531,10 @@ jobs: trigger: true - get: gpdb_package resource: gpdb[[gp_ver]]_rhel8_rpm_latest-0 - - get: gpdb[[gp_ver]]-pxf-dev-rhel8-image + - get: gpdb[[gp_ver]]-pxf-dev-rocky8-image - get: pxf-build-dependencies - task: Build PXF-GP[[gp_ver]] on RHEL8 - image: gpdb[[gp_ver]]-pxf-dev-rhel8-image + image: gpdb[[gp_ver]]-pxf-dev-rocky8-image file: pxf_src/concourse/tasks/build.yml params: LICENSE: ((ud/pxf/common/rpm-license)) @@ -564,7 +560,7 @@ jobs: - get: gpdb_package resource: gpdb[[gp_ver]]_rhel8_rpm_latest-0 passed: [Build PXF-GP[[gp_ver]] on RHEL8] - - get: gpdb[[gp_ver]]-pxf-dev-rhel8-image + - get: gpdb[[gp_ver]]-pxf-dev-rocky8-image passed: [Build PXF-GP[[gp_ver]] on RHEL8] - get: pxf-automation-dependencies {% if gp_ver == 7 %} @@ -574,7 +570,7 @@ jobs: resource: singlecluster-hdp2 - task: Test PXF-GP[[gp_ver]]-HDP2 on RHEL8 file: pxf_src/concourse/tasks/test.yml - image: gpdb[[gp_ver]]-pxf-dev-rhel8-image + image: gpdb[[gp_ver]]-pxf-dev-rocky8-image params: ACCESS_KEY_ID: ((tf-machine-access-key-id)) GP_VER: [[gp_ver]] @@ -605,14 +601,14 @@ jobs: {% if gp_ver == 7 %} - get: gp6-python-libs {% endif %} - - get: gpdb[[gp_ver]]-pxf-dev-rhel8-image + - get: gpdb[[gp_ver]]-pxf-dev-rocky8-image passed: [Build PXF-GP[[gp_ver]] on RHEL8] - get: pxf-automation-dependencies - get: singlecluster resource: singlecluster-hdp2 - task: Test PXF-FDW-GP[[gp_ver]]-HDP2 on RHEL8 file: pxf_src/concourse/tasks/test.yml - image: gpdb[[gp_ver]]-pxf-dev-rhel8-image + image: gpdb[[gp_ver]]-pxf-dev-rocky8-image params: ACCESS_KEY_ID: ((tf-machine-access-key-id)) GP_VER: [[gp_ver]] diff --git a/concourse/pipelines/templates/perf_pipeline-tpl.yml b/concourse/pipelines/templates/perf_pipeline-tpl.yml index e3045ff531..c26ced08de 100644 --- a/concourse/pipelines/templates/perf_pipeline-tpl.yml +++ b/concourse/pipelines/templates/perf_pipeline-tpl.yml @@ -127,7 +127,7 @@ resources: type: registry-image icon: docker source: - repository: gcr.io/data-gpdb-ud/gpdb-pxf-dev/gpdb6-rhel8-test-pxf + repository: gcr.io/data-gpdb-ud/gpdb-pxf-dev/gpdb6-rocky8-test-pxf tag: latest username: _json_key password: ((ud/pxf/secrets/pxf-cloudbuild-service-account-key)) diff --git a/concourse/pipelines/templates/pr_pipeline-tpl.yml b/concourse/pipelines/templates/pr_pipeline-tpl.yml index d4aff4d13e..2319d2fd36 100644 --- a/concourse/pipelines/templates/pr_pipeline-tpl.yml +++ b/concourse/pipelines/templates/pr_pipeline-tpl.yml @@ -58,15 +58,11 @@ resources: {% endfor %} {# gp5, gp6 #} {% for gp_ver in range(6, 8) %} -- name: gpdb[[gp_ver]]-pxf-dev-rhel8-image +- name: gpdb[[gp_ver]]-pxf-dev-rocky8-image type: registry-image icon: docker source: -{% if gp_ver == 6 %} - repository: gcr.io/data-gpdb-ud/gpdb-pxf-dev/gpdb[[gp_ver]]-rhel8-test-pxf -{% else %} repository: gcr.io/data-gpdb-ud/gpdb-pxf-dev/gpdb[[gp_ver]]-rocky8-test-pxf -{% endif %} tag: latest username: _json_key password: ((ud/pxf/secrets/pxf-cloudbuild-service-account-key)) @@ -215,18 +211,18 @@ jobs: trigger: true - get: gpdb_package resource: gpdb[[gp_ver]]_rhel8_rpm_latest-0 - - get: gpdb[[gp_ver]]-pxf-dev-rhel8-image + - get: gpdb[[gp_ver]]-pxf-dev-rocky8-image - get: pxf-build-dependencies - get: singlecluster resource: singlecluster-hdp2 - task: Build PXF-GP[[gp_ver]] on RHEL8 - image: gpdb[[gp_ver]]-pxf-dev-rhel8-image + image: gpdb[[gp_ver]]-pxf-dev-rocky8-image file: pxf_src/concourse/tasks/build.yml params: LICENSE: ((ud/pxf/common/rpm-license)) VENDOR: ((ud/pxf/common/rpm-vendor)) - task: Test Against Greenplum Latest RHEL8 - image: gpdb[[gp_ver]]-pxf-dev-rhel8-image + image: gpdb[[gp_ver]]-pxf-dev-rocky8-image config: platform: linux inputs: From aec089d97d93ffc1fa83b28acddc7d9393465b31 Mon Sep 17 00:00:00 2001 From: "Bradford D. Boyle" Date: Fri, 19 May 2023 10:44:42 -0700 Subject: [PATCH 07/35] Add Make variable to conditionally include check-creds (#975) When setting a Concourse pipeline, the set-pipeline command can take an optional flag named --check-creds that will validate credential variables against the credential manager. Lately this validation can take an extended period of time which can slow down the development cycle when iterating on CI pipelines. Rather than remove the flag altogether and risk missing an incorrect credential in a production pipeline, this commit adds a new Make varialble named CHECK_CREDS which defaults to true. If this variable is true, then the --check-creds flag to set-pipeline will be included. Authored-by: Bradford D. Boyle --- concourse/Makefile | 37 +++++++++++++++++-------------------- 1 file changed, 17 insertions(+), 20 deletions(-) diff --git a/concourse/Makefile b/concourse/Makefile index 5b4ab8c71c..5fda6dd540 100644 --- a/concourse/Makefile +++ b/concourse/Makefile @@ -18,6 +18,7 @@ NUM_GPDB5_VERSIONS ?= 10 NUM_GPDB6_VERSIONS ?= 9 REDHAT_MAJOR_VERSION ?= 7 FLY_CMD ?= fly +CHECK_CREDS ?= true TEMPLATE_CMD = ./template_tool FLY_OPTION_NON-INTERACTIVE = SLACK ?= true @@ -37,6 +38,11 @@ MINIO ?= false OEL7 ?= false FILE ?= false +SET_PIPELINE := set-pipeline +ifeq ($(CHECK_CREDS), true) +SET_PIPELINE += --check-creds +endif + .PHONY: build certification dev pr cloudbuild longevity build: set-build-pipeline certification: set-certification-pipeline @@ -48,6 +54,7 @@ longevity: set-longevity-pipeline # ============================= BUILD PIPELINE TARGETS ============================= .PHONY: set-build-pipeline +set-build-pipeline: SLACK=true set-build-pipeline: @PIPELINE_FILE=$$(mktemp) && \ $(TEMPLATE_CMD) --template build_pipeline-tpl.yml --vars \ @@ -56,8 +63,7 @@ set-build-pipeline: num_gpdb5_versions=$(NUM_GPDB5_VERSIONS) \ num_gpdb6_versions=$(NUM_GPDB6_VERSIONS) >"$${PIPELINE_FILE}" && \ $(FLY_CMD) --target=$(CONCOURSE) \ - set-pipeline \ - --check-creds \ + $(SET_PIPELINE) \ --pipeline=$(BUILD_PIPELINE_NAME) \ --config "$${PIPELINE_FILE}" \ --load-vars-from=$(HOME)/workspace/pxf/concourse/settings/pxf-multinode-params.yml \ @@ -78,8 +84,7 @@ set-dev-release-pipeline: num_gpdb5_versions=$(NUM_GPDB5_VERSIONS) \ num_gpdb6_versions=$(NUM_GPDB6_VERSIONS) >"$${PIPELINE_FILE}" && \ $(FLY_CMD) --target=$(CONCOURSE) \ - set-pipeline \ - --check-creds \ + $(SET_PIPELINE) \ \ --pipeline=$(DEV_BUILD_PIPELINE_NAME) \ --config "$${PIPELINE_FILE}" \ --load-vars-from=$(HOME)/workspace/pxf/concourse/settings/pxf-multinode-params.yml \ @@ -117,8 +122,7 @@ set-dev-build-pipeline: num_gpdb5_versions=1 \ num_gpdb6_versions=1 >"$${PIPELINE_FILE}" && \ $(FLY_CMD) --target=$(CONCOURSE) \ - set-pipeline \ - --check-creds \ + $(SET_PIPELINE) \ --pipeline=$(DEV_BUILD_PIPELINE_NAME) \ --config "$${PIPELINE_FILE}" \ --load-vars-from=$(HOME)/workspace/pxf/concourse/settings/pxf-multinode-params.yml \ @@ -131,8 +135,7 @@ set-dev-build-pipeline: .PHONY: set-hadoop-cluster-cleaner set-hadoop-cluster-cleaner: $(FLY_CMD) --target=$(CONCOURSE) \ - set-pipeline \ - --check-creds \ + $(SET_PIPELINE) \ --pipeline=hadoop-cluster-cleaner \ --config=pipelines/hadoop-cluster-cleaner.yml \ --var=pxf-git-branch=main \ @@ -150,8 +153,7 @@ set-pr-build-pipeline: num_gpdb5_versions=1 \ num_gpdb6_versions=1 >"$${PIPELINE_FILE}" && \ $(FLY_CMD) --target=$(CONCOURSE) \ - set-pipeline \ - --check-creds \ + $(SET_PIPELINE) \ --pipeline=$(PR_BUILD_PIPELINE_NAME) \ --config "$${PIPELINE_FILE}" \ ${FLY_OPTION_NON-INTERACTIVE} || echo "Generated yaml has errors: check $${PIPELINE_FILE}" @@ -165,8 +167,7 @@ set-pr-build-pipeline: .PHONY: set-certification-pipeline set-certification-pipeline: @$(FLY_CMD) --target=$(CONCOURSE) \ - set-pipeline \ - --check-creds \ + $(SET_PIPELINE) \ --pipeline=$(CERTIFICATION_PIPELINE_NAME) \ --config pipelines/certification_pipeline.yml \ --var=pxf-git-branch=${BRANCH} \ @@ -180,8 +181,7 @@ set-certification-pipeline: .PHONY: set-cloudbuild-pipeline set-cloudbuild-pipeline: $(FLY_CMD) --target=$(CONCOURSE) \ - set-pipeline \ - --check-creds \ + $(SET_PIPELINE) \ --config $(HOME)/workspace/pxf/concourse/pipelines/cloudbuild_pipeline.yml \ --var pxf-git-branch=$(BRANCH) \ --pipeline cloudbuild @@ -196,8 +196,7 @@ set-pivnet-pipeline: num_gpdb5_versions=$(NUM_GPDB5_VERSIONS) \ num_gpdb6_versions=$(NUM_GPDB6_VERSIONS) >"$${PIPELINE_FILE}" && \ $(FLY_CMD) --target=$(CONCOURSE) \ - set-pipeline \ - --check-creds \ + $(SET_PIPELINE) \ --pipeline=${PIVNET_PIPELINE_NAME} \ --config "$${PIPELINE_FILE}" \ --var pxf-git-branch=$(BRANCH) \ @@ -224,8 +223,7 @@ perf: $(TEMPLATE_CMD) --template perf_pipeline-tpl.yml --vars \ redhat_major_version=$(REDHAT_MAJOR_VERSION) >"$${PIPELINE_FILE}" && \ $(FLY_CMD) --target=$(CONCOURSE) \ - set-pipeline \ - --check-creds \ + $(SET_PIPELINE) \ --config "$${PIPELINE_FILE}" \ --load-vars-from=$(HOME)/workspace/pxf/concourse/settings/perf-settings-$(SCALE)g.yml \ --var pxf-git-branch=$(BRANCH) \ @@ -258,8 +256,7 @@ query-execution-parquet-1g: .PHONY: set-longevity-pipeline set-longevity-pipeline: $(FLY_CMD) --target=$(CONCOURSE) \ - set-pipeline \ - --check-creds \ + $(SET_PIPELINE) \ --pipeline=dev:longevity_$(YOUR_TAG)_6X_STABLE \ --config pipelines/longevity_pipeline.yml \ --load-vars-from=settings/pxf-multinode-params.yml \ From 6df9abf8af1731cf34343dc2d8ab972392a8aaf3 Mon Sep 17 00:00:00 2001 From: Himanshu Pandey Date: Tue, 30 May 2023 10:52:18 -0700 Subject: [PATCH 08/35] FDW changes for JdbcTest (#972) * FDW changes for JdbcTest --- .../greenplum/pxf/automation/components/gpdb/Gpdb.java | 5 ++++- .../automation/structures/tables/pxf/ForeignTable.java | 2 +- .../automation/structures/tables/utils/TableFactory.java | 2 +- .../greenplum/pxf/automation/features/jdbc/JdbcTest.java | 9 +++++++++ 4 files changed, 15 insertions(+), 3 deletions(-) diff --git a/automation/src/main/java/org/greenplum/pxf/automation/components/gpdb/Gpdb.java b/automation/src/main/java/org/greenplum/pxf/automation/components/gpdb/Gpdb.java index 5fb1832c17..8f3353ece0 100755 --- a/automation/src/main/java/org/greenplum/pxf/automation/components/gpdb/Gpdb.java +++ b/automation/src/main/java/org/greenplum/pxf/automation/components/gpdb/Gpdb.java @@ -173,8 +173,11 @@ private void createForeignServers(boolean ignoreFail) throws Exception { List servers = Lists.newArrayList( "default_hdfs", "default_hive", + "db_hive_jdbc", // Needed for JdbcHiveTest "default_hbase", - "default_jdbc", + "default_jdbc", // Needed for JdbcHiveTest and other JdbcTest which refers to the default server. + "database_jdbc", + "db-session-params_jdbc", "default_file", "default_s3", "default_gs", diff --git a/automation/src/main/java/org/greenplum/pxf/automation/structures/tables/pxf/ForeignTable.java b/automation/src/main/java/org/greenplum/pxf/automation/structures/tables/pxf/ForeignTable.java index 6b5ff265b2..0422558d51 100644 --- a/automation/src/main/java/org/greenplum/pxf/automation/structures/tables/pxf/ForeignTable.java +++ b/automation/src/main/java/org/greenplum/pxf/automation/structures/tables/pxf/ForeignTable.java @@ -39,7 +39,7 @@ protected String createServer() { String[] serverParameters = StringUtils.defaultIfBlank(getServer(), "default").split("="); // getServer() might return a string "server=<..>", strip the prefix int index = serverParameters.length > 1 ? 1 : 0; - return String.format(" SERVER %s_%s", serverParameters[index], getProtocol()); + return String.format(" SERVER %s_%s", serverParameters[index].replace("-","_"), getProtocol()); } protected String createOptions() { diff --git a/automation/src/main/java/org/greenplum/pxf/automation/structures/tables/utils/TableFactory.java b/automation/src/main/java/org/greenplum/pxf/automation/structures/tables/utils/TableFactory.java index 36a7b9fd77..b5bb46f9fc 100755 --- a/automation/src/main/java/org/greenplum/pxf/automation/structures/tables/utils/TableFactory.java +++ b/automation/src/main/java/org/greenplum/pxf/automation/structures/tables/utils/TableFactory.java @@ -497,7 +497,7 @@ private static ExternalTable getPxfJdbcReadableTable(String name, userParameters.add("USER=" + user); } if (server != null) { - userParameters.add("SERVER=" + server); + exTable.setServer("SERVER=" + server); } if (customParameters != null) { userParameters.add(customParameters); diff --git a/automation/src/test/java/org/greenplum/pxf/automation/features/jdbc/JdbcTest.java b/automation/src/test/java/org/greenplum/pxf/automation/features/jdbc/JdbcTest.java index fe71caec63..67046b264c 100755 --- a/automation/src/test/java/org/greenplum/pxf/automation/features/jdbc/JdbcTest.java +++ b/automation/src/test/java/org/greenplum/pxf/automation/features/jdbc/JdbcTest.java @@ -2,6 +2,8 @@ import java.io.File; +import annotations.FailsWithFDW; +import annotations.WorksWithFDW; import org.greenplum.pxf.automation.structures.tables.basic.Table; import org.greenplum.pxf.automation.structures.tables.pxf.ExternalTable; import org.greenplum.pxf.automation.structures.tables.utils.TableFactory; @@ -11,6 +13,7 @@ import org.greenplum.pxf.automation.features.BaseFeature; +@WorksWithFDW public class JdbcTest extends BaseFeature { private static final String POSTGRES_DRIVER_CLASS = "org.postgresql.Driver"; @@ -401,16 +404,22 @@ public void readViewSessionParams() throws Exception { runTincTest("pxf.features.jdbc.session_params.runTest"); } + @FailsWithFDW + // All the Writable Tests are failing with this Error: + // ERROR: PXF server error : class java.io.DataInputStream cannot be cast to class + // [B (java.io.DataInputStream and [B are in module java.base of loader 'bootstrap') @Test(groups = {"features", "gpdb", "security", "jdbc"}) public void jdbcWritableTable() throws Exception { runTincTest("pxf.features.jdbc.writable.runTest"); } + @FailsWithFDW @Test(groups = {"features", "gpdb", "security", "jdbc"}) public void jdbcWritableTableNoBatch() throws Exception { runTincTest("pxf.features.jdbc.writable_nobatch.runTest"); } + @FailsWithFDW @Test(groups = {"features", "gpdb", "security", "jdbc"}) public void jdbcWritableTablePool() throws Exception { runTincTest("pxf.features.jdbc.writable_pool.runTest"); From cc47568963c6ee015c32b9261063ce10717aabad Mon Sep 17 00:00:00 2001 From: Himanshu Pandey Date: Tue, 30 May 2023 13:38:01 -0700 Subject: [PATCH 09/35] FDW: Fix for skipping the dropped and correctly counting Projection Index (#967) * Fix for skipping the dropped columns in FDW --- .../structures/tables/utils/TableFactory.java | 24 ++ .../ColumnProjectionTest.java | 91 ++++- .../features/general/AlterTableTest.java | 70 ++-- .../checkColumnProjection_fdw/__init__.py | 0 .../expected/query01.ans | 358 ++++++++++++++++++ .../checkColumnProjection_fdw/runTest.py | 12 + .../checkColumnProjection_fdw/sql/query01.sql | 77 ++++ .../general/alter/csv/expected/query01.ans | 3 + .../general/alter/csv/sql/query01.sql | 3 + .../expected/query01.ans | 3 + .../without_column_projection/sql/query01.sql | 3 + fdw/pxf_bridge.c | 4 +- fdw/pxf_bridge.h | 1 + fdw/pxf_fdw.c | 1 + fdw/pxf_header.c | 116 ++++-- fdw/pxf_header.h | 3 +- 16 files changed, 696 insertions(+), 73 deletions(-) create mode 100755 automation/tincrepo/main/pxf/features/columnprojection/checkColumnProjection_fdw/__init__.py create mode 100755 automation/tincrepo/main/pxf/features/columnprojection/checkColumnProjection_fdw/expected/query01.ans create mode 100755 automation/tincrepo/main/pxf/features/columnprojection/checkColumnProjection_fdw/runTest.py create mode 100755 automation/tincrepo/main/pxf/features/columnprojection/checkColumnProjection_fdw/sql/query01.sql diff --git a/automation/src/main/java/org/greenplum/pxf/automation/structures/tables/utils/TableFactory.java b/automation/src/main/java/org/greenplum/pxf/automation/structures/tables/utils/TableFactory.java index b5bb46f9fc..fb9155dbbf 100755 --- a/automation/src/main/java/org/greenplum/pxf/automation/structures/tables/utils/TableFactory.java +++ b/automation/src/main/java/org/greenplum/pxf/automation/structures/tables/utils/TableFactory.java @@ -3,6 +3,7 @@ import java.util.ArrayList; import java.util.List; +import org.apache.commons.lang.StringUtils; import org.greenplum.pxf.automation.enums.EnumPartitionType; import org.greenplum.pxf.automation.enums.EnumPxfDefaultProfiles; @@ -655,6 +656,29 @@ public static ExternalTable getPxfJdbcWritableTable(String name, String[] fields return getPxfJdbcWritableTable(name, fields, dataSourcePath, null, null, null, customParameter); } + /** + * Prepares PXF Readable External or Foreign Table for custom data format. + * + * @param name name of the external table which will be generated + * @param fields fields of the external table + * @param path for external table path + * @param dataFormat dataFormat for the external table + * @return PXF Readable External or Foreign table + */ + public static ReadableExternalTable getPxfReadableCustomTable(String name, + String[] fields, + String path, + String dataFormat) { + ReadableExternalTable exTable = getReadableExternalOrForeignTable(name, fields, path, "CUSTOM"); + exTable.setFormatter("pxfwritable_import"); + + if (StringUtils.isNotBlank(dataFormat)) { + exTable.setProfile(ProtocolUtils.getProtocol().value() + ":" + dataFormat); + } + + return exTable; + } + // ============ FDW Adapter ============ private static ReadableExternalTable getReadableExternalOrForeignTable (String name, String[] fields, String path, String format) { return FDWUtils.useFDW ? diff --git a/automation/src/test/java/org/greenplum/pxf/automation/features/columnprojection/ColumnProjectionTest.java b/automation/src/test/java/org/greenplum/pxf/automation/features/columnprojection/ColumnProjectionTest.java index d8da5f3112..9ad9150b10 100755 --- a/automation/src/test/java/org/greenplum/pxf/automation/features/columnprojection/ColumnProjectionTest.java +++ b/automation/src/test/java/org/greenplum/pxf/automation/features/columnprojection/ColumnProjectionTest.java @@ -1,16 +1,17 @@ package org.greenplum.pxf.automation.features.columnprojection; -import annotations.FailsWithFDW; +import annotations.WorksWithFDW; import org.greenplum.pxf.automation.components.cluster.PhdCluster; import org.greenplum.pxf.automation.features.BaseFeature; import org.greenplum.pxf.automation.structures.tables.pxf.ReadableExternalTable; import org.greenplum.pxf.automation.structures.tables.utils.TableFactory; +import org.greenplum.pxf.automation.utils.system.FDWUtils; import org.testng.annotations.Test; import java.io.File; /** Functional PXF column projection cases */ -@FailsWithFDW +@WorksWithFDW public class ColumnProjectionTest extends BaseFeature { String testPackageLocation = "/org/greenplum/pxf/automation/testplugin/"; @@ -54,7 +55,91 @@ public void checkColumnProjection() throws Exception { // SELECT t0, colprojvalue FROM test_column_projection GROUP BY t0, colprojvalue HAVING AVG(a1) < 5 ORDER BY t0; // SELECT b.value, a.colprojvalue FROM test_column_projection a JOIN t0_values b ON a.t0 = b.key; if (gpdb.getVersion() >= 7) { - runTincTest("pxf.features.columnprojection.checkColumnProjection_gp7.runTest"); + /* The below query (mentioned in above comment as well) is propagating for FDW but not for external-table, + * so use a different test set for FDW. + * The Call stack is different in case of external-table and FDW. + + SELECT b.value, a.colprojvalue FROM test_column_projection a JOIN t0_values b ON a.t0 = b.key; + value | colprojvalue + -------+----------------- + 50 | t0|colprojvalue + (1 row) + + Following are the explain plans for the external-table and FDW for the same query, + The different explain plans explains that for one it is projecting and for other it's not. + External Table: + + pxfautomation=# \d+ e_test_column_projection + Foreign table "public.e_test_column_projection" + Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description + --------------+---------+-----------+----------+---------+-------------+----------+--------------+------------- + t0 | text | | | | | extended | | + a1 | integer | | | | | plain | | + b2 | boolean | | | | | plain | | + colprojvalue | text | | | | | extended | | + FDW options: (format 'text', delimiter ',', "null" E'\\N', escape E'\\', format_type 't', location_uris 'pxf://dummy_path?PROFILE=test:text&FRAGMENTER=org.greenplum.pxf.automation.testplugin.ColumnProjectionVerifyFragmenter&ACCESSOR=org.greenplum.pxf.automation.testplugin.ColumnProjectionVerifyAccessor&RESOLVER=org.greenplum.pxf.plugins.hdfs.StringPassResolver', execute_on 'ALL_SEGMENTS', log_errors 'f', encoding '6', is_writable 'false') + + pxfautomation=# explain analyze SELECT b.value, a.colprojvalue FROM e_test_column_projection a JOIN t0_values b ON a.t0 = b.key; + QUERY PLAN + --------------------------------------------------------------------------------------------------------------------------------------------------- + Gather Motion 3:1 (slice1; segments: 3) (cost=2306.08..20789139.42 rows=77900000 width=36) (actual time=78.603..78.606 rows=1 loops=1) + -> Hash Join (cost=2306.08..19750472.75 rows=25966667 width=36) (actual time=52.602..63.398 rows=1 loops=1) + Hash Cond: (a.t0 = (b.key)::text) + Extra Text: (seg0) Hash chain length 1.0 avg, 1 max, using 1 of 524288 buckets. + -> Foreign Scan on e_test_column_projection a (cost=0.00..11000.00 rows=1000000 width=64) (actual time=51.045..51.076 rows=10 loops=1) + -> Hash (cost=1332.33..1332.33 rows=77900 width=12) (actual time=0.039..0.040 rows=1 loops=1) + Buckets: 524288 Batches: 1 Memory Usage: 4097kB + -> Broadcast Motion 3:3 (slice2; segments: 3) (cost=0.00..1332.33 rows=77900 width=12) (actual time=0.013..0.014 rows=1 loops=1) + -> Seq Scan on t0_values b (cost=0.00..293.67 rows=25967 width=12) (actual time=1.760..1.762 rows=1 loops=1) + Optimizer: Postgres query optimizer + Planning Time: 1.151 ms + (slice0) Executor memory: 106K bytes. + (slice1) Executor memory: 4253K bytes avg x 3 workers, 4253K bytes max (seg0). Work_mem: 4097K bytes max. + (slice2) Executor memory: 37K bytes avg x 3 workers, 37K bytes max (seg0). + Memory used: 128000kB + Execution Time: 8581.766 ms + (16 rows) + + FDW: + + pxfautomation=# \d+ test_column_projection + Foreign table "public.test_column_projection" + Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description + --------------+---------+-----------+----------+---------+-------------+----------+--------------+------------- + t0 | text | | | | | extended | | + a1 | integer | | | | | plain | | + b2 | boolean | | | | | plain | | + colprojvalue | text | | | | | extended | | + Server: default_test + FDW options: (resource 'dummy_path', format 'text', fragmenter 'org.greenplum.pxf.automation.testplugin.ColumnProjectionVerifyFragmenter', accessor 'org.greenplum.pxf.automation.testplugin.ColumnProjectionVerifyAccessor', resolver 'org.greenplum.pxf.plugins.hdfs.StringPassResolver', delimiter ',') + + pxfautomation=# explain analyze SELECT b.value, a.colprojvalue FROM test_column_projection a JOIN t0_values b ON a.t0 = b.key; + QUERY PLAN + ------------------------------------------------------------------------------------------------------------------------------------------------------------ + Gather Motion 3:1 (slice1; segments: 3) (cost=50077.50..52328.93 rows=77900 width=36) (actual time=117.120..117.133 rows=1 loops=1) + -> Hash Join (cost=50077.50..51290.27 rows=25967 width=36) (actual time=111.568..112.709 rows=1 loops=1) + Hash Cond: ((b.key)::text = a.t0) + Extra Text: (seg2) Hash chain length 1.0 avg, 1 max, using 10 of 262144 buckets. + -> Seq Scan on t0_values b (cost=0.00..293.67 rows=25967 width=12) (actual time=0.964..0.966 rows=1 loops=1) + -> Hash (cost=50040.00..50040.00 rows=3000 width=64) (actual time=110.975..110.976 rows=10 loops=1) + Buckets: 262144 Batches: 1 Memory Usage: 2049kB + -> Broadcast Motion 3:3 (slice2; segments: 3) (cost=50000.00..50040.00 rows=3000 width=64) (actual time=110.902..110.906 rows=10 loops=1) + -> Foreign Scan on test_column_projection a (cost=50000.00..50000.00 rows=1000 width=64) (actual time=1.312..1.329 rows=10 loops=1) + Optimizer: Postgres query optimizer + Planning Time: 0.592 ms + (slice0) Executor memory: 38K bytes. + (slice1) Executor memory: 2101K bytes avg x 3 workers, 2101K bytes max (seg0). Work_mem: 2049K bytes max. + (slice2) Executor memory: 90K bytes avg x 3 workers, 97K bytes max (seg2). + Memory used: 128000kB + Execution Time: 117.692 ms + (16 rows) + */ + if (FDWUtils.useFDW) { + runTincTest("pxf.features.columnprojection.checkColumnProjection_fdw.runTest"); + } + else { + runTincTest("pxf.features.columnprojection.checkColumnProjection_gp7.runTest"); + } } else { runTincTest("pxf.features.columnprojection.checkColumnProjection.runTest"); } diff --git a/automation/src/test/java/org/greenplum/pxf/automation/features/general/AlterTableTest.java b/automation/src/test/java/org/greenplum/pxf/automation/features/general/AlterTableTest.java index af2fd8c94a..a7d0d652bf 100644 --- a/automation/src/test/java/org/greenplum/pxf/automation/features/general/AlterTableTest.java +++ b/automation/src/test/java/org/greenplum/pxf/automation/features/general/AlterTableTest.java @@ -1,13 +1,14 @@ package org.greenplum.pxf.automation.features.general; +import annotations.FailsWithFDW; +import annotations.WorksWithFDW; import org.greenplum.pxf.automation.features.BaseFeature; import org.greenplum.pxf.automation.structures.tables.basic.Table; -import org.greenplum.pxf.automation.structures.tables.pxf.ReadableExternalTable; -import org.greenplum.pxf.automation.structures.tables.pxf.WritableExternalTable; import org.greenplum.pxf.automation.structures.tables.utils.TableFactory; import org.greenplum.pxf.automation.utils.system.ProtocolUtils; import org.testng.annotations.Test; +@WorksWithFDW public class AlterTableTest extends BaseFeature { private static final String AVRO_TYPES_FILE_NAME = "supported_primitive_types"; @@ -89,47 +90,38 @@ public void beforeClass() throws Exception { @Test(groups = {"features", "gpdb", "security"}) public void dropAndAddColumnsPxfWritableImportWithColumnProjectionSupport() throws Exception { - exTable = new ReadableExternalTable(PXF_ALTER_PARQUET_TABLE, - PARQUET_TABLE_COLUMNS, hdfsPath + "/parquet/" + PARQUET_PRIMITIVE_TYPES, "custom"); - exTable.setHost(pxfHost); - exTable.setPort(pxfPort); - exTable.setFormatter("pxfwritable_import"); - exTable.setProfile(ProtocolUtils.getProtocol().value() + ":parquet"); - - gpdb.createTableAndVerify(exTable); + exTable = TableFactory.getPxfReadableCustomTable(PXF_ALTER_PARQUET_TABLE, + PARQUET_TABLE_COLUMNS, hdfsPath + "/parquet/" + PARQUET_PRIMITIVE_TYPES, "parquet"); + setParamsAndVerifyTable(); runTincTest("pxf.features.general.alter.pxfwritable_import.with_column_projection.runTest"); } + // TODO: Determine the reason why FDW is failing with the below class cast exception: + // + // ERROR: PXF server error : class java.io.DataInputStream cannot be cast to class java.lang.String + // (java.io.DataInputStream and java.lang.String are in module java.base of loader 'bootstrap') + @FailsWithFDW @Test(groups = {"features", "gpdb", "security"}) public void dropColumnsPxfWritableExport() throws Exception { // Create source table - exTable = new ReadableExternalTable(PXF_PARQUET_TABLE_SOURCE, - PARQUET_TABLE_COLUMNS, hdfsPath + "/parquet/" + PARQUET_PRIMITIVE_TYPES, "custom"); - exTable.setHost(pxfHost); - exTable.setPort(pxfPort); - exTable.setFormatter("pxfwritable_import"); - exTable.setProfile(ProtocolUtils.getProtocol().value() + ":parquet"); - gpdb.createTableAndVerify(exTable); + exTable = TableFactory.getPxfReadableCustomTable(PXF_PARQUET_TABLE_SOURCE, + PARQUET_TABLE_COLUMNS, hdfsPath + "/parquet/" + PARQUET_PRIMITIVE_TYPES, "parquet"); + setParamsAndVerifyTable(); // Create writable table - exTable = new WritableExternalTable(PXF_ALTER_WRITE_PARQUET_TABLE, - PARQUET_TABLE_COLUMNS, hdfsPath + "/parquet-write/" + PARQUET_WRITE_PRIMITIVES, "custom"); - exTable.setHost(pxfHost); - exTable.setPort(pxfPort); + exTable = TableFactory.getPxfWritableTextTable(PXF_ALTER_WRITE_PARQUET_TABLE, + PARQUET_TABLE_COLUMNS, hdfsPath + "/parquet-write/" + PARQUET_WRITE_PRIMITIVES, null); exTable.setFormatter("pxfwritable_export"); exTable.setProfile(ProtocolUtils.getProtocol().value() + ":parquet"); - gpdb.createTableAndVerify(exTable); + exTable.setFormat("CUSTOM"); + setParamsAndVerifyTable(); // Create validation table - exTable = new ReadableExternalTable(PXF_ALTER_WRITE_PARQUET_TABLE + "_r", - PARQUET_TABLE_SUBSET_COLUMNS, hdfsPath + "/parquet-write/" + PARQUET_WRITE_PRIMITIVES, "custom"); - exTable.setHost(pxfHost); - exTable.setPort(pxfPort); - exTable.setFormatter("pxfwritable_import"); - exTable.setProfile(ProtocolUtils.getProtocol().value() + ":parquet"); - gpdb.createTableAndVerify(exTable); + exTable = TableFactory.getPxfReadableCustomTable(PXF_ALTER_WRITE_PARQUET_TABLE + "_r", + PARQUET_TABLE_SUBSET_COLUMNS, hdfsPath + "/parquet-write/" + PARQUET_WRITE_PRIMITIVES, "parquet"); + setParamsAndVerifyTable(); runTincTest("pxf.features.general.alter.pxfwritable_export.parquet.runTest"); } @@ -137,7 +129,7 @@ public void dropColumnsPxfWritableExport() throws Exception { @Test(groups = {"features", "gpdb", "security"}) public void dropAndAddColumnsPxfWritableImportWithoutColumnProjectionSupport() throws Exception { // default external table with common settings - exTable = new ReadableExternalTable(PXF_ALTER_AVRO_TABLE, new String[]{ + exTable = TableFactory.getPxfReadableCustomTable(PXF_ALTER_AVRO_TABLE, new String[]{ "type_int int", "type_double float8", "type_string text", @@ -145,13 +137,8 @@ public void dropAndAddColumnsPxfWritableImportWithoutColumnProjectionSupport() t "col_does_not_exist text", "type_long bigint", "type_bytes bytea", - "type_boolean bool"}, hdfsPath + "/avro/" + AVRO_TYPES_FILE_NAME + SUFFIX_AVRO, "custom"); - exTable.setHost(pxfHost); - exTable.setPort(pxfPort); - exTable.setFormatter("pxfwritable_import"); - exTable.setProfile(ProtocolUtils.getProtocol().value() + ":avro"); - - gpdb.createTableAndVerify(exTable); + "type_boolean bool"}, hdfsPath + "/avro/" + AVRO_TYPES_FILE_NAME + SUFFIX_AVRO, "avro"); + setParamsAndVerifyTable(); // Verify results runTincTest("pxf.features.general.alter.pxfwritable_import.without_column_projection.runTest"); @@ -178,10 +165,15 @@ public void dropAndAddColumsCsv() throws Exception { "longNum bigint", "bool boolean" }, hdfsPath + "/csv/" + fileName, ","); + setParamsAndVerifyTable(); + + runTincTest("pxf.features.general.alter.csv.runTest"); + } + + private void setParamsAndVerifyTable() throws Exception + { exTable.setHost(pxfHost); exTable.setPort(pxfPort); gpdb.createTableAndVerify(exTable); - - runTincTest("pxf.features.general.alter.csv.runTest"); } } diff --git a/automation/tincrepo/main/pxf/features/columnprojection/checkColumnProjection_fdw/__init__.py b/automation/tincrepo/main/pxf/features/columnprojection/checkColumnProjection_fdw/__init__.py new file mode 100755 index 0000000000..e69de29bb2 diff --git a/automation/tincrepo/main/pxf/features/columnprojection/checkColumnProjection_fdw/expected/query01.ans b/automation/tincrepo/main/pxf/features/columnprojection/checkColumnProjection_fdw/expected/query01.ans new file mode 100755 index 0000000000..2c6b313141 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/columnprojection/checkColumnProjection_fdw/expected/query01.ans @@ -0,0 +1,358 @@ +-- start_ignore +-- end_ignore +-- start_ignore +DROP TABLE IF EXISTS t0_values; +DROP TABLE +CREATE TABLE t0_values(key char(1), value int) DISTRIBUTED BY (key); +CREATE TABLE +INSERT INTO t0_values VALUES('A', 50); +INSERT 0 1 +-- end_ignore +-- @description query01 for PXF Column Projection Support +SET optimizer = off; +SET +SELECT * FROM test_column_projection ORDER BY t0; + t0 | a1 | b2 | colprojvalue +----+----+----+---------------------- + A | 0 | t | No Column Projection + B | 1 | f | No Column Projection + C | 2 | t | No Column Projection + D | 3 | f | No Column Projection + E | 4 | t | No Column Projection + F | 5 | f | No Column Projection + G | 6 | t | No Column Projection + H | 7 | f | No Column Projection + I | 8 | t | No Column Projection + J | 9 | f | No Column Projection +(10 rows) + +SELECT t0, colprojvalue FROM test_column_projection ORDER BY t0; + t0 | colprojvalue +----+----------------- + A | t0|colprojvalue + B | t0|colprojvalue + C | t0|colprojvalue + D | t0|colprojvalue + E | t0|colprojvalue + F | t0|colprojvalue + G | t0|colprojvalue + H | t0|colprojvalue + I | t0|colprojvalue + J | t0|colprojvalue +(10 rows) + +SELECT colprojvalue FROM test_column_projection ORDER BY t0; + colprojvalue +----------------- + t0|colprojvalue + t0|colprojvalue + t0|colprojvalue + t0|colprojvalue + t0|colprojvalue + t0|colprojvalue + t0|colprojvalue + t0|colprojvalue + t0|colprojvalue + t0|colprojvalue +(10 rows) + +SELECT t0, colprojvalue FROM test_column_projection WHERE b2 ORDER BY t0; + t0 | colprojvalue +----+-------------------- + A | t0|b2|colprojvalue + C | t0|b2|colprojvalue + E | t0|b2|colprojvalue + G | t0|b2|colprojvalue + I | t0|b2|colprojvalue +(5 rows) + +SELECT t0, a1, colprojvalue FROM test_column_projection WHERE a1 < 5 AND b2 = false ORDER BY t0; + t0 | a1 | colprojvalue +----+----+----------------------- + B | 1 | t0|a1|b2|colprojvalue + D | 3 | t0|a1|b2|colprojvalue +(2 rows) + +SELECT round(sqrt(a1)::numeric, 5), colprojvalue FROM test_column_projection WHERE a1 < 5 AND b2 = false ORDER BY t0; + round | colprojvalue +---------+----------------------- + 1.00000 | t0|a1|b2|colprojvalue + 1.73205 | t0|a1|b2|colprojvalue +(2 rows) + +SELECT round(sqrt(a1)::numeric, 5), colprojvalue FROM test_column_projection WHERE b2 = false ORDER BY t0; + round | colprojvalue +---------+----------------------- + 1.00000 | t0|a1|b2|colprojvalue + 1.73205 | t0|a1|b2|colprojvalue + 2.23607 | t0|a1|b2|colprojvalue + 2.64575 | t0|a1|b2|colprojvalue + 3.00000 | t0|a1|b2|colprojvalue +(5 rows) + +SELECT t0, colprojvalue FROM test_column_projection WHERE a1 < 5 ORDER BY t0; + t0 | colprojvalue +----+-------------------- + A | t0|a1|colprojvalue + B | t0|a1|colprojvalue + C | t0|a1|colprojvalue + D | t0|a1|colprojvalue + E | t0|a1|colprojvalue +(5 rows) + +SELECT t0, colprojvalue FROM test_column_projection WHERE a1 <= 5 ORDER BY t0; + t0 | colprojvalue +----+-------------------- + A | t0|a1|colprojvalue + B | t0|a1|colprojvalue + C | t0|a1|colprojvalue + D | t0|a1|colprojvalue + E | t0|a1|colprojvalue + F | t0|a1|colprojvalue +(6 rows) + +SELECT t0, colprojvalue FROM test_column_projection GROUP BY t0, colprojvalue HAVING AVG(a1) < 5 ORDER BY t0; + t0 | colprojvalue +----+---------------------- + A | No Column Projection + B | No Column Projection + C | No Column Projection + D | No Column Projection + E | No Column Projection +(5 rows) + +SELECT b.value, a.colprojvalue FROM test_column_projection a JOIN t0_values b ON a.t0 = b.key; + value | colprojvalue +-------+----------------- + 50 | t0|colprojvalue +(1 row) + +SELECT t0, colprojvalue FROM test_column_projection WHERE a1 < 2 OR a1 >= 8 ORDER BY t0; + t0 | colprojvalue +----+-------------------- + A | t0|a1|colprojvalue + B | t0|a1|colprojvalue + I | t0|a1|colprojvalue + J | t0|a1|colprojvalue +(4 rows) + +SELECT t0, colprojvalue FROM test_column_projection WHERE sqrt(a1) > 1 ORDER BY t0; + t0 | colprojvalue +----+-------------------- + C | t0|a1|colprojvalue + D | t0|a1|colprojvalue + E | t0|a1|colprojvalue + F | t0|a1|colprojvalue + G | t0|a1|colprojvalue + H | t0|a1|colprojvalue + I | t0|a1|colprojvalue + J | t0|a1|colprojvalue +(8 rows) + +SELECT t0, colprojvalue, round(sqrt(a1)::numeric, 5) FROM test_column_projection ORDER BY t0; + t0 | colprojvalue | round +----+--------------------+--------- + A | t0|a1|colprojvalue | 0.00000 + B | t0|a1|colprojvalue | 1.00000 + C | t0|a1|colprojvalue | 1.41421 + D | t0|a1|colprojvalue | 1.73205 + E | t0|a1|colprojvalue | 2.00000 + F | t0|a1|colprojvalue | 2.23607 + G | t0|a1|colprojvalue | 2.44949 + H | t0|a1|colprojvalue | 2.64575 + I | t0|a1|colprojvalue | 2.82843 + J | t0|a1|colprojvalue | 3.00000 +(10 rows) + +-- Casting boolean column to int +SELECT t0, colprojvalue, sqrt(b2::int) FROM test_column_projection ORDER BY t0; + t0 | colprojvalue | sqrt +----+--------------------+------ + A | t0|b2|colprojvalue | 1 + B | t0|b2|colprojvalue | 0 + C | t0|b2|colprojvalue | 1 + D | t0|b2|colprojvalue | 0 + E | t0|b2|colprojvalue | 1 + F | t0|b2|colprojvalue | 0 + G | t0|b2|colprojvalue | 1 + H | t0|b2|colprojvalue | 0 + I | t0|b2|colprojvalue | 1 + J | t0|b2|colprojvalue | 0 +(10 rows) + +SET optimizer = on; +SET +SELECT * FROM test_column_projection ORDER BY t0; + t0 | a1 | b2 | colprojvalue +----+----+----+---------------------- + A | 0 | t | No Column Projection + B | 1 | f | No Column Projection + C | 2 | t | No Column Projection + D | 3 | f | No Column Projection + E | 4 | t | No Column Projection + F | 5 | f | No Column Projection + G | 6 | t | No Column Projection + H | 7 | f | No Column Projection + I | 8 | t | No Column Projection + J | 9 | f | No Column Projection +(10 rows) + +SELECT t0, colprojvalue FROM test_column_projection ORDER BY t0; + t0 | colprojvalue +----+----------------- + A | t0|colprojvalue + B | t0|colprojvalue + C | t0|colprojvalue + D | t0|colprojvalue + E | t0|colprojvalue + F | t0|colprojvalue + G | t0|colprojvalue + H | t0|colprojvalue + I | t0|colprojvalue + J | t0|colprojvalue +(10 rows) + +SELECT colprojvalue FROM test_column_projection ORDER BY t0; + colprojvalue +----------------- + t0|colprojvalue + t0|colprojvalue + t0|colprojvalue + t0|colprojvalue + t0|colprojvalue + t0|colprojvalue + t0|colprojvalue + t0|colprojvalue + t0|colprojvalue + t0|colprojvalue +(10 rows) + +SELECT t0, colprojvalue FROM test_column_projection WHERE b2 ORDER BY t0; + t0 | colprojvalue +----+-------------------- + A | t0|b2|colprojvalue + C | t0|b2|colprojvalue + E | t0|b2|colprojvalue + G | t0|b2|colprojvalue + I | t0|b2|colprojvalue +(5 rows) + +SELECT t0, a1, colprojvalue FROM test_column_projection WHERE a1 < 5 AND b2 = false ORDER BY t0; + t0 | a1 | colprojvalue +----+----+----------------------- + B | 1 | t0|a1|b2|colprojvalue + D | 3 | t0|a1|b2|colprojvalue +(2 rows) + +SELECT round(sqrt(a1)::numeric, 5), colprojvalue FROM test_column_projection WHERE a1 < 5 AND b2 = false ORDER BY t0; + round | colprojvalue +---------+----------------------- + 1.00000 | t0|a1|b2|colprojvalue + 1.73205 | t0|a1|b2|colprojvalue +(2 rows) + +SELECT round(sqrt(a1)::numeric, 5), colprojvalue FROM test_column_projection WHERE b2 = false ORDER BY t0; + round | colprojvalue +---------+----------------------- + 1.00000 | t0|a1|b2|colprojvalue + 1.73205 | t0|a1|b2|colprojvalue + 2.23607 | t0|a1|b2|colprojvalue + 2.64575 | t0|a1|b2|colprojvalue + 3.00000 | t0|a1|b2|colprojvalue +(5 rows) + +SELECT t0, colprojvalue FROM test_column_projection WHERE a1 < 5 ORDER BY t0; + t0 | colprojvalue +----+-------------------- + A | t0|a1|colprojvalue + B | t0|a1|colprojvalue + C | t0|a1|colprojvalue + D | t0|a1|colprojvalue + E | t0|a1|colprojvalue +(5 rows) + +SELECT t0, colprojvalue FROM test_column_projection WHERE a1 <= 5 ORDER BY t0; + t0 | colprojvalue +----+-------------------- + A | t0|a1|colprojvalue + B | t0|a1|colprojvalue + C | t0|a1|colprojvalue + D | t0|a1|colprojvalue + E | t0|a1|colprojvalue + F | t0|a1|colprojvalue +(6 rows) + +SELECT t0, colprojvalue FROM test_column_projection GROUP BY t0, colprojvalue HAVING AVG(a1) < 5 ORDER BY t0; + t0 | colprojvalue +----+-------------------- + A | t0|a1|colprojvalue + B | t0|a1|colprojvalue + C | t0|a1|colprojvalue + D | t0|a1|colprojvalue + E | t0|a1|colprojvalue +(5 rows) + +SELECT b.value, a.colprojvalue FROM test_column_projection a JOIN t0_values b ON a.t0 = b.key; + value | colprojvalue +-------+----------------- + 50 | t0|colprojvalue +(1 row) + +SELECT t0, colprojvalue FROM test_column_projection WHERE a1 < 2 OR a1 >= 8 ORDER BY t0; + t0 | colprojvalue +----+-------------------- + A | t0|a1|colprojvalue + B | t0|a1|colprojvalue + I | t0|a1|colprojvalue + J | t0|a1|colprojvalue +(4 rows) + +SELECT t0, colprojvalue FROM test_column_projection WHERE sqrt(a1) > 1 ORDER BY t0; + t0 | colprojvalue +----+-------------------- + C | t0|a1|colprojvalue + D | t0|a1|colprojvalue + E | t0|a1|colprojvalue + F | t0|a1|colprojvalue + G | t0|a1|colprojvalue + H | t0|a1|colprojvalue + I | t0|a1|colprojvalue + J | t0|a1|colprojvalue +(8 rows) + +SELECT t0, colprojvalue, round(sqrt(a1)::numeric, 5) FROM test_column_projection ORDER BY t0; + t0 | colprojvalue | round +----+--------------------+--------- + A | t0|a1|colprojvalue | 0.00000 + B | t0|a1|colprojvalue | 1.00000 + C | t0|a1|colprojvalue | 1.41421 + D | t0|a1|colprojvalue | 1.73205 + E | t0|a1|colprojvalue | 2.00000 + F | t0|a1|colprojvalue | 2.23607 + G | t0|a1|colprojvalue | 2.44949 + H | t0|a1|colprojvalue | 2.64575 + I | t0|a1|colprojvalue | 2.82843 + J | t0|a1|colprojvalue | 3.00000 +(10 rows) + +-- Casting boolean column to int +SELECT t0, colprojvalue, sqrt(b2::int) FROM test_column_projection ORDER BY t0; + t0 | colprojvalue | sqrt +----+--------------------+------ + A | t0|b2|colprojvalue | 1 + B | t0|b2|colprojvalue | 0 + C | t0|b2|colprojvalue | 1 + D | t0|b2|colprojvalue | 0 + E | t0|b2|colprojvalue | 1 + F | t0|b2|colprojvalue | 0 + G | t0|b2|colprojvalue | 1 + H | t0|b2|colprojvalue | 0 + I | t0|b2|colprojvalue | 1 + J | t0|b2|colprojvalue | 0 +(10 rows) + +-- cleanup +-- start_ignore +DROP TABLE IF EXISTS t0_values; +DROP TABLE +-- end_ignore diff --git a/automation/tincrepo/main/pxf/features/columnprojection/checkColumnProjection_fdw/runTest.py b/automation/tincrepo/main/pxf/features/columnprojection/checkColumnProjection_fdw/runTest.py new file mode 100755 index 0000000000..310814b6ea --- /dev/null +++ b/automation/tincrepo/main/pxf/features/columnprojection/checkColumnProjection_fdw/runTest.py @@ -0,0 +1,12 @@ +from mpp.models import SQLTestCase +from mpp.models import SQLConcurrencyTestCase + +class PxfColumnProjection(SQLConcurrencyTestCase): + """ + @db_name pxfautomation + @concurrency 1 + @gpdiff True + """ + sql_dir = 'sql' + ans_dir = 'expected' + out_dir = 'output' diff --git a/automation/tincrepo/main/pxf/features/columnprojection/checkColumnProjection_fdw/sql/query01.sql b/automation/tincrepo/main/pxf/features/columnprojection/checkColumnProjection_fdw/sql/query01.sql new file mode 100755 index 0000000000..eb833658a6 --- /dev/null +++ b/automation/tincrepo/main/pxf/features/columnprojection/checkColumnProjection_fdw/sql/query01.sql @@ -0,0 +1,77 @@ +-- start_ignore +DROP TABLE IF EXISTS t0_values; +CREATE TABLE t0_values(key char(1), value int) DISTRIBUTED BY (key); +INSERT INTO t0_values VALUES('A', 50); +-- end_ignore +-- @description query01 for PXF Column Projection Support + +SET optimizer = off; + +SELECT * FROM test_column_projection ORDER BY t0; + +SELECT t0, colprojvalue FROM test_column_projection ORDER BY t0; + +SELECT colprojvalue FROM test_column_projection ORDER BY t0; + +SELECT t0, colprojvalue FROM test_column_projection WHERE b2 ORDER BY t0; + +SELECT t0, a1, colprojvalue FROM test_column_projection WHERE a1 < 5 AND b2 = false ORDER BY t0; + +SELECT round(sqrt(a1)::numeric, 5), colprojvalue FROM test_column_projection WHERE a1 < 5 AND b2 = false ORDER BY t0; + +SELECT round(sqrt(a1)::numeric, 5), colprojvalue FROM test_column_projection WHERE b2 = false ORDER BY t0; + +SELECT t0, colprojvalue FROM test_column_projection WHERE a1 < 5 ORDER BY t0; + +SELECT t0, colprojvalue FROM test_column_projection WHERE a1 <= 5 ORDER BY t0; + +SELECT t0, colprojvalue FROM test_column_projection GROUP BY t0, colprojvalue HAVING AVG(a1) < 5 ORDER BY t0; + +SELECT b.value, a.colprojvalue FROM test_column_projection a JOIN t0_values b ON a.t0 = b.key; + +SELECT t0, colprojvalue FROM test_column_projection WHERE a1 < 2 OR a1 >= 8 ORDER BY t0; + +SELECT t0, colprojvalue FROM test_column_projection WHERE sqrt(a1) > 1 ORDER BY t0; + +SELECT t0, colprojvalue, round(sqrt(a1)::numeric, 5) FROM test_column_projection ORDER BY t0; + +-- Casting boolean column to int +SELECT t0, colprojvalue, sqrt(b2::int) FROM test_column_projection ORDER BY t0; + +SET optimizer = on; + +SELECT * FROM test_column_projection ORDER BY t0; + +SELECT t0, colprojvalue FROM test_column_projection ORDER BY t0; + +SELECT colprojvalue FROM test_column_projection ORDER BY t0; + +SELECT t0, colprojvalue FROM test_column_projection WHERE b2 ORDER BY t0; + +SELECT t0, a1, colprojvalue FROM test_column_projection WHERE a1 < 5 AND b2 = false ORDER BY t0; + +SELECT round(sqrt(a1)::numeric, 5), colprojvalue FROM test_column_projection WHERE a1 < 5 AND b2 = false ORDER BY t0; + +SELECT round(sqrt(a1)::numeric, 5), colprojvalue FROM test_column_projection WHERE b2 = false ORDER BY t0; + +SELECT t0, colprojvalue FROM test_column_projection WHERE a1 < 5 ORDER BY t0; + +SELECT t0, colprojvalue FROM test_column_projection WHERE a1 <= 5 ORDER BY t0; + +SELECT t0, colprojvalue FROM test_column_projection GROUP BY t0, colprojvalue HAVING AVG(a1) < 5 ORDER BY t0; + +SELECT b.value, a.colprojvalue FROM test_column_projection a JOIN t0_values b ON a.t0 = b.key; + +SELECT t0, colprojvalue FROM test_column_projection WHERE a1 < 2 OR a1 >= 8 ORDER BY t0; + +SELECT t0, colprojvalue FROM test_column_projection WHERE sqrt(a1) > 1 ORDER BY t0; + +SELECT t0, colprojvalue, round(sqrt(a1)::numeric, 5) FROM test_column_projection ORDER BY t0; + +-- Casting boolean column to int +SELECT t0, colprojvalue, sqrt(b2::int) FROM test_column_projection ORDER BY t0; + +-- cleanup +-- start_ignore +DROP TABLE IF EXISTS t0_values; +-- end_ignore diff --git a/automation/tincrepo/main/pxf/features/general/alter/csv/expected/query01.ans b/automation/tincrepo/main/pxf/features/general/alter/csv/expected/query01.ans index e0fa1f3e04..387649e390 100755 --- a/automation/tincrepo/main/pxf/features/general/alter/csv/expected/query01.ans +++ b/automation/tincrepo/main/pxf/features/general/alter/csv/expected/query01.ans @@ -18,6 +18,9 @@ -- m/CONTEXT:.*line.*/ -- s/line \d* of //g -- +-- m/tmp\/pxf_automation_data\/(.*)/ +-- s/tmp\/pxf_automation_data\/(.*)/pxf:\/\/pxf_automation_data?PROFILE=*:text/g +-- -- end_matchsubs -- This query should error out with invalid input syntax for integer diff --git a/automation/tincrepo/main/pxf/features/general/alter/csv/sql/query01.sql b/automation/tincrepo/main/pxf/features/general/alter/csv/sql/query01.sql index 71fa13953f..55b4390644 100755 --- a/automation/tincrepo/main/pxf/features/general/alter/csv/sql/query01.sql +++ b/automation/tincrepo/main/pxf/features/general/alter/csv/sql/query01.sql @@ -16,6 +16,9 @@ -- m/CONTEXT:.*line.*/ -- s/line \d* of //g -- +-- m/tmp\/pxf_automation_data\/(.*)/ +-- s/tmp\/pxf_automation_data\/(.*)/pxf:\/\/pxf_automation_data?PROFILE=*:text/g +-- -- end_matchsubs -- This query should error out with invalid input syntax for integer diff --git a/automation/tincrepo/main/pxf/features/general/alter/pxfwritable_import/without_column_projection/expected/query01.ans b/automation/tincrepo/main/pxf/features/general/alter/pxfwritable_import/without_column_projection/expected/query01.ans index 434e1cd97b..270788417e 100755 --- a/automation/tincrepo/main/pxf/features/general/alter/pxfwritable_import/without_column_projection/expected/query01.ans +++ b/automation/tincrepo/main/pxf/features/general/alter/pxfwritable_import/without_column_projection/expected/query01.ans @@ -8,6 +8,9 @@ -- m/DETAIL/ -- s/DETAIL/CONTEXT/ -- +-- m/,.*line (.*)/ +-- s/,.*line (.*)//g +-- -- end_matchsubs -- sets the bytea output to the expected by the tests SET bytea_output='escape'; diff --git a/automation/tincrepo/main/pxf/features/general/alter/pxfwritable_import/without_column_projection/sql/query01.sql b/automation/tincrepo/main/pxf/features/general/alter/pxfwritable_import/without_column_projection/sql/query01.sql index 9faf58a82f..d8230707c8 100755 --- a/automation/tincrepo/main/pxf/features/general/alter/pxfwritable_import/without_column_projection/sql/query01.sql +++ b/automation/tincrepo/main/pxf/features/general/alter/pxfwritable_import/without_column_projection/sql/query01.sql @@ -7,6 +7,9 @@ -- m/DETAIL/ -- s/DETAIL/CONTEXT/ -- +-- m/,.*line (.*)/ +-- s/,.*line (.*)//g +-- -- end_matchsubs -- sets the bytea output to the expected by the tests diff --git a/fdw/pxf_bridge.c b/fdw/pxf_bridge.c index f25392897f..b56a2510f3 100644 --- a/fdw/pxf_bridge.c +++ b/fdw/pxf_bridge.c @@ -72,7 +72,8 @@ PxfBridgeImportStart(PxfFdwScanState *pxfsstate) pxfsstate->options, pxfsstate->relation, pxfsstate->filter_str, - pxfsstate->retrieved_attrs); + pxfsstate->retrieved_attrs, + pxfsstate->projectionInfo); pxfsstate->churl_handle = churl_init_download(pxfsstate->uri.data, pxfsstate->churl_headers); @@ -92,6 +93,7 @@ PxfBridgeExportStart(PxfFdwModifyState *pxfmstate) pxfmstate->options, pxfmstate->relation, NULL, + NULL, NULL); pxfmstate->churl_handle = churl_init_upload(pxfmstate->uri.data, pxfmstate->churl_headers); } diff --git a/fdw/pxf_bridge.h b/fdw/pxf_bridge.h index 6693b72be8..b8a866a497 100644 --- a/fdw/pxf_bridge.h +++ b/fdw/pxf_bridge.h @@ -52,6 +52,7 @@ typedef struct PxfFdwScanState List *retrieved_attrs; PxfOptions *options; CopyState cstate; + ProjectionInfo *projectionInfo; } PxfFdwScanState; /* diff --git a/fdw/pxf_fdw.c b/fdw/pxf_fdw.c index 1116f5a549..52dca360c4 100644 --- a/fdw/pxf_fdw.c +++ b/fdw/pxf_fdw.c @@ -422,6 +422,7 @@ pxfBeginForeignScan(ForeignScanState *node, int eflags) pxfsstate->quals = quals; pxfsstate->relation = relation; pxfsstate->retrieved_attrs = retrieved_attrs; + pxfsstate->projectionInfo = node->ss.ps.ps_ProjInfo; /* Set up callback to identify error foreign relation. */ ErrorContextCallback errcallback; diff --git a/fdw/pxf_header.c b/fdw/pxf_header.c index 8f8705c241..b0175d2565 100644 --- a/fdw/pxf_header.c +++ b/fdw/pxf_header.c @@ -39,7 +39,7 @@ static void AddAlignmentSizeHttpHeader(CHURL_HEADERS headers); static void AddTupleDescriptionToHttpHeader(CHURL_HEADERS headers, Relation rel); static void AddOptionsToHttpHeader(CHURL_HEADERS headers, List *options); -static void AddProjectionDescHttpHeader(CHURL_HEADERS headers, List *retrieved_attrs); +static void AddProjectionDescHttpHeader(CHURL_HEADERS headers, List *retrieved_attrs, Relation rel); static void AddProjectionIndexHeader(CHURL_HEADERS headers, int attno, char *long_number); static char *NormalizeKeyName(const char *key); static char *TypeOidGetTypename(Oid typid); @@ -55,7 +55,8 @@ BuildHttpHeaders(CHURL_HEADERS headers, PxfOptions *options, Relation relation, char *filter_string, - List *retrieved_attrs) + List *retrieved_attrs, + ProjectionInfo *projectionInfo) { extvar_t ev; char pxfPortString[sizeof(int32) * 8]; @@ -71,10 +72,19 @@ BuildHttpHeaders(CHURL_HEADERS headers, relnamespace = GetNamespaceName(RelationGetNamespace(relation)); } - if (retrieved_attrs != NULL) + // If projectionInfo is Null, it means there should be no projection. + // We can skip the whole logic of adding projection desc to headers. + // projectionInfo can be NULL in some cases like "SELECT * FROM" + // because we are selecting all the columns here. + if (projectionInfo != NULL) { + //TODO retrieved_attrs contains the list of columns being retrieved + // Ideally this should contains the correct projectionInfo but in some cases it isn't the case. + // For e.g. select * from TABLE_NAME + // Need to figure out if we should be using the projectionInfo or retrieved_attrs here for the projection. + /* add the list of attrs to the projection desc http headers */ - AddProjectionDescHttpHeader(headers, retrieved_attrs); + AddProjectionDescHttpHeader(headers, retrieved_attrs, relation); } /* GP cluster configuration */ @@ -167,11 +177,23 @@ AddAlignmentSizeHttpHeader(CHURL_HEADERS headers) * X-GP-ATTR-TYPENAMEX - attribute X's type name (e.g, "boolean") * optional - X-GP-ATTR-TYPEMODX-COUNT - total number of modifier for attribute X * optional - X-GP-ATTR-TYPEMODX-Y - attribute X's modifiers Y (types which have precision info, like numeric(p,s)) + * + * If a column has been dropped from the foreign table definition, that + * column will not be reported to the PXF server (as if it never existed). + * For example: + * + * --------------------------------------------- + * | col1 | col2 | col3 (dropped) | col4 | + * --------------------------------------------- + * + * Col4 will appear as col3 to the PXF server as if col3 never existed, and + * only 3 columns will be reported to PXF server. */ static void AddTupleDescriptionToHttpHeader(CHURL_HEADERS headers, Relation rel) { char long_number[sizeof(int32) * 8]; + int i, attrIx; StringInfoData formatter; TupleDesc tuple; @@ -180,29 +202,29 @@ AddTupleDescriptionToHttpHeader(CHURL_HEADERS headers, Relation rel) /* Get tuple description itself */ tuple = RelationGetDescr(rel); - /* Convert the number of attributes to a string */ - pg_ltoa(tuple->natts, long_number); - churl_headers_append(headers, "X-GP-ATTRS", long_number); - /* Iterate attributes */ - for (int i = 0; i < tuple->natts; ++i) + for (i = 0, attrIx = 0; i < tuple->natts; ++i) { Form_pg_attribute attr = TupleDescAttr(tuple, i); + // Ignore dropped attributes + if (attr->attisdropped) + continue; + /* Add a key/value pair for attribute name */ resetStringInfo(&formatter); - appendStringInfo(&formatter, "X-GP-ATTR-NAME%u", i); + appendStringInfo(&formatter, "X-GP-ATTR-NAME%u", attrIx); churl_headers_append(headers, formatter.data, attr->attname.data); /* Add a key/value pair for attribute type */ resetStringInfo(&formatter); - appendStringInfo(&formatter, "X-GP-ATTR-TYPECODE%u", i); + appendStringInfo(&formatter, "X-GP-ATTR-TYPECODE%u", attrIx); pg_ltoa(attr->atttypid, long_number); churl_headers_append(headers, formatter.data, long_number); /* Add a key/value pair for attribute type name */ resetStringInfo(&formatter); - appendStringInfo(&formatter, "X-GP-ATTR-TYPENAME%u", i); + appendStringInfo(&formatter, "X-GP-ATTR-TYPENAME%u", attrIx); churl_headers_append(headers, formatter.data, TypeOidGetTypename(attr->atttypid)); /* Add attribute type modifiers if any */ @@ -213,20 +235,20 @@ AddTupleDescriptionToHttpHeader(CHURL_HEADERS headers, Relation rel) case NUMERICOID: { resetStringInfo(&formatter); - appendStringInfo(&formatter, "X-GP-ATTR-TYPEMOD%u-COUNT", i); + appendStringInfo(&formatter, "X-GP-ATTR-TYPEMOD%u-COUNT", attrIx); pg_ltoa(2, long_number); churl_headers_append(headers, formatter.data, long_number); /* precision */ resetStringInfo(&formatter); - appendStringInfo(&formatter, "X-GP-ATTR-TYPEMOD%u-%u", i, 0); + appendStringInfo(&formatter, "X-GP-ATTR-TYPEMOD%u-%u", attrIx, 0); pg_ltoa((attr->atttypmod >> 16) & 0xffff, long_number); churl_headers_append(headers, formatter.data, long_number); /* scale */ resetStringInfo(&formatter); - appendStringInfo(&formatter, "X-GP-ATTR-TYPEMOD%u-%u", i, 1); + appendStringInfo(&formatter, "X-GP-ATTR-TYPEMOD%u-%u", attrIx, 1); pg_ltoa((attr->atttypmod - VARHDRSZ) & 0xffff, long_number); churl_headers_append(headers, formatter.data, long_number); break; @@ -236,12 +258,12 @@ AddTupleDescriptionToHttpHeader(CHURL_HEADERS headers, Relation rel) case VARCHAROID: { resetStringInfo(&formatter); - appendStringInfo(&formatter, "X-GP-ATTR-TYPEMOD%u-COUNT", i); + appendStringInfo(&formatter, "X-GP-ATTR-TYPEMOD%u-COUNT", attrIx); pg_ltoa(1, long_number); churl_headers_append(headers, formatter.data, long_number); resetStringInfo(&formatter); - appendStringInfo(&formatter, "X-GP-ATTR-TYPEMOD%u-%u", i, 0); + appendStringInfo(&formatter, "X-GP-ATTR-TYPEMOD%u-%u", attrIx, 0); pg_ltoa((attr->atttypmod - VARHDRSZ), long_number); churl_headers_append(headers, formatter.data, long_number); break; @@ -254,12 +276,12 @@ AddTupleDescriptionToHttpHeader(CHURL_HEADERS headers, Relation rel) case TIMETZOID: { resetStringInfo(&formatter); - appendStringInfo(&formatter, "X-GP-ATTR-TYPEMOD%u-COUNT", i); + appendStringInfo(&formatter, "X-GP-ATTR-TYPEMOD%u-COUNT", attrIx); pg_ltoa(1, long_number); churl_headers_append(headers, formatter.data, long_number); resetStringInfo(&formatter); - appendStringInfo(&formatter, "X-GP-ATTR-TYPEMOD%u-%u", i, 0); + appendStringInfo(&formatter, "X-GP-ATTR-TYPEMOD%u-%u", attrIx, 0); pg_ltoa((attr->atttypmod), long_number); churl_headers_append(headers, formatter.data, long_number); break; @@ -267,12 +289,12 @@ AddTupleDescriptionToHttpHeader(CHURL_HEADERS headers, Relation rel) case INTERVALOID: { resetStringInfo(&formatter); - appendStringInfo(&formatter, "X-GP-ATTR-TYPEMOD%u-COUNT", i); + appendStringInfo(&formatter, "X-GP-ATTR-TYPEMOD%u-COUNT", attrIx); pg_ltoa(1, long_number); churl_headers_append(headers, formatter.data, long_number); resetStringInfo(&formatter); - appendStringInfo(&formatter, "X-GP-ATTR-TYPEMOD%u-%u", i, 0); + appendStringInfo(&formatter, "X-GP-ATTR-TYPEMOD%u-%u", attrIx, 0); pg_ltoa(INTERVAL_PRECISION(attr->atttypmod), long_number); churl_headers_append(headers, formatter.data, long_number); break; @@ -282,8 +304,13 @@ AddTupleDescriptionToHttpHeader(CHURL_HEADERS headers, Relation rel) break; } } + attrIx++; } + /* Convert the number of attributes to a string */ + pg_ltoa(attrIx, long_number); + churl_headers_append(headers, "X-GP-ATTRS", long_number); + pfree(formatter.data); } @@ -291,25 +318,56 @@ AddTupleDescriptionToHttpHeader(CHURL_HEADERS headers, Relation rel) * Report projection description to the remote component */ static void -AddProjectionDescHttpHeader(CHURL_HEADERS headers, List *retrieved_attrs) +AddProjectionDescHttpHeader(CHURL_HEADERS headers, List *retrieved_attrs, Relation rel) { - ListCell *lc1 = NULL; + ListCell *lc = NULL; char long_number[sizeof(int32) * 8]; + TupleDesc tupdesc = RelationGetDescr(rel); + Bitmapset *attrs_projected = NULL; + int droppedCount = 0; - foreach(lc1, retrieved_attrs) + if (retrieved_attrs == NIL || retrieved_attrs->length == 0) + return; + + foreach(lc, retrieved_attrs) { - int attno = lfirst_int(lc1); + int attno = lfirst_int(lc); - /* zero-based index in the server side */ - AddProjectionIndexHeader(headers, attno - 1, long_number); + attrs_projected = bms_add_member(attrs_projected, + attno - FirstLowInvalidHeapAttributeNumber); } - if (retrieved_attrs->length == 0) - return; + for (int i = 1; i <= tupdesc->natts; i++) + { + /* Dropped attributes count needs for proper indexing of the projected columns. + * For eg: + * --------------------------------------------- + * | col1 | col2 (dropped) | col3 | col4 | + * --------------------------------------------- + * + * We use 0-based indexing and since col2 was dropped, + * the indices for col3 and col4 get shifted by -1. + * Let's assume that col1 and col4 are projected, the reported projected + * indices will then be 0, 2. + */ + if (TupleDescAttr(tupdesc, i-1)->attisdropped) + { + /* keep a counter of the number of dropped attributes */ + droppedCount++; + continue; + } + + if (bms_is_member(i - FirstLowInvalidHeapAttributeNumber, attrs_projected)) + { + /* zero-based index in the server side */ + AddProjectionIndexHeader(headers, i - 1 - droppedCount, long_number); + } + } /* Convert the number of projection columns to a string */ pg_ltoa(retrieved_attrs->length, long_number); churl_headers_append(headers, "X-GP-ATTRS-PROJ", long_number); + bms_free(attrs_projected); } /* diff --git a/fdw/pxf_header.h b/fdw/pxf_header.h index eeb1842bfd..7dfcf614d1 100644 --- a/fdw/pxf_header.h +++ b/fdw/pxf_header.h @@ -38,6 +38,7 @@ extern void BuildHttpHeaders(CHURL_HEADERS headers, PxfOptions *options, Relation relation, char *filter_string, - List *retrieved_attrs); + List *retrieved_attrs, + ProjectionInfo *projectionInfo); #endif /* _PXFHEADERS_H_ */ From 27b899d96e111480e4f72d98b0323bbbe4021bb6 Mon Sep 17 00:00:00 2001 From: Alexander Denissov Date: Mon, 5 Jun 2023 10:22:11 -0700 Subject: [PATCH 10/35] Enable write flow for FDW for non-text/csv formats (#973) --- .../components/common/DbSystemObject.java | 37 ++- .../pxf/automation/components/gpdb/Gpdb.java | 143 +++++++- .../pxf/automation/components/hdfs/Hdfs.java | 33 +- .../structures/tables/pxf/ExternalTable.java | 37 ++- .../structures/tables/pxf/ForeignTable.java | 7 +- .../features/avro/HdfsReadableAvroTest.java | 3 - .../features/avro/HdfsWritableAvroTest.java | 260 +++++++-------- .../features/cloud/CloudAccessTest.java | 4 +- .../features/parquet/ParquetWriteTest.java | 86 ++--- .../writable/HdfsWritableTextTest.java | 77 +++-- .../automation/smoke/WritableSmokeTest.java | 35 +- .../tinc/main/tinctest/lib/global_init_file | 4 + .../expected/query01.ans | 6 + .../sql/query01.sql | 6 + .../expected/query01.ans | 8 +- .../no_server_no_credentials/sql/query01.sql | 6 +- .../expected/query01.ans | 8 +- .../sql/query01.sql | 6 + .../expected/query01.ans | 5 +- .../sql/query01.sql | 3 + .../expected/query01.ans | 5 +- .../sql/query01.sql | 3 + .../expected/query01.ans | 5 +- .../sql/query01.sql | 3 + .../expected/query01.ans | 5 +- .../sql/query01.sql | 3 + .../expected/query01.ans | 9 +- .../sql/query01.sql | 9 +- .../no_schema_file/expected/query01.ans | 3 + .../errors/no_schema_file/sql/query01.sql | 3 + fdw/pxf_fdw.c | 124 +++++-- server/build.gradle | 1 + .../org/greenplum/pxf/api/io/DataType.java | 20 ++ .../pxf/api/model/InputStreamHandler.java | 14 + .../greenplum/pxf/api/io/DataTypeTest.java | 19 ++ .../pxf/plugins/hdfs/AvroResolver.java | 5 +- .../pxf/plugins/hdfs/StringPassResolver.java | 2 + .../pxf/plugins/hdfs/AvroResolverTest.java | 115 +++++++ server/pxf-service/build.gradle | 2 + .../pxf/service/BridgeInputBuilder.java | 72 ---- .../pxf/service/HttpRequestParser.java | 31 +- .../greenplum/pxf/service/bridge/Bridge.java | 18 +- .../pxf/service/bridge/ReadBridge.java | 3 +- .../service/bridge/SimpleBridgeFactory.java | 9 +- .../pxf/service/bridge/WriteBridge.java | 53 +-- .../service/bridge/WriteVectorizedBridge.java | 9 +- .../service/controller/WriteServiceImpl.java | 2 +- .../pxf/service/serde/BaseRecordReader.java | 39 +++ .../service/serde}/GPDBWritableMapper.java | 4 +- .../serde/GPDBWritableRecordReader.java | 47 +++ .../pxf/service/serde/RecordReader.java | 24 ++ .../service/serde/RecordReaderFactory.java | 61 ++++ .../pxf/service/serde/StreamRecordReader.java | 36 ++ .../pxf/service/serde/TextRecordReader.java | 219 +++++++++++++ .../pxf/service/BridgeInputBuilderTest.java | 150 --------- .../pxf/service/GPDataGenerator.java | 308 ++++++++++++++++++ .../bridge/SimpleBridgeFactoryTest.java | 13 +- .../pxf/service/bridge/WriteBridgeTest.java | 25 +- .../serde/RecordReaderFactoryTest.java | 49 +++ .../service/serde/StreamRecordReaderTest.java | 34 ++ .../service/serde/TextRecordReaderTest.java | 83 +++++ .../src/test/resources/data/README.md | 4 + .../src/test/resources/data/sample_data.csv | 27 ++ .../src/test/resources/data/sample_data.sql | 36 ++ .../src/test/resources/data/sample_data.txt | 27 ++ .../test/resources/data/sample_data_pipe.csv | 27 ++ 66 files changed, 1952 insertions(+), 582 deletions(-) create mode 100644 server/pxf-api/src/main/java/org/greenplum/pxf/api/model/InputStreamHandler.java delete mode 100644 server/pxf-service/src/main/java/org/greenplum/pxf/service/BridgeInputBuilder.java create mode 100644 server/pxf-service/src/main/java/org/greenplum/pxf/service/serde/BaseRecordReader.java rename server/{pxf-api/src/main/java/org/greenplum/pxf/api => pxf-service/src/main/java/org/greenplum/pxf/service/serde}/GPDBWritableMapper.java (98%) create mode 100644 server/pxf-service/src/main/java/org/greenplum/pxf/service/serde/GPDBWritableRecordReader.java create mode 100644 server/pxf-service/src/main/java/org/greenplum/pxf/service/serde/RecordReader.java create mode 100644 server/pxf-service/src/main/java/org/greenplum/pxf/service/serde/RecordReaderFactory.java create mode 100644 server/pxf-service/src/main/java/org/greenplum/pxf/service/serde/StreamRecordReader.java create mode 100644 server/pxf-service/src/main/java/org/greenplum/pxf/service/serde/TextRecordReader.java delete mode 100644 server/pxf-service/src/test/java/org/greenplum/pxf/service/BridgeInputBuilderTest.java create mode 100644 server/pxf-service/src/test/java/org/greenplum/pxf/service/GPDataGenerator.java create mode 100644 server/pxf-service/src/test/java/org/greenplum/pxf/service/serde/RecordReaderFactoryTest.java create mode 100644 server/pxf-service/src/test/java/org/greenplum/pxf/service/serde/StreamRecordReaderTest.java create mode 100644 server/pxf-service/src/test/java/org/greenplum/pxf/service/serde/TextRecordReaderTest.java create mode 100644 server/pxf-service/src/test/resources/data/README.md create mode 100644 server/pxf-service/src/test/resources/data/sample_data.csv create mode 100644 server/pxf-service/src/test/resources/data/sample_data.sql create mode 100644 server/pxf-service/src/test/resources/data/sample_data.txt create mode 100644 server/pxf-service/src/test/resources/data/sample_data_pipe.csv diff --git a/automation/src/main/java/org/greenplum/pxf/automation/components/common/DbSystemObject.java b/automation/src/main/java/org/greenplum/pxf/automation/components/common/DbSystemObject.java index efa5ab22ed..c389ea5b6b 100755 --- a/automation/src/main/java/org/greenplum/pxf/automation/components/common/DbSystemObject.java +++ b/automation/src/main/java/org/greenplum/pxf/automation/components/common/DbSystemObject.java @@ -15,6 +15,7 @@ import jsystem.framework.report.Reporter; import org.greenplum.pxf.automation.structures.tables.basic.Table; +import org.greenplum.pxf.automation.structures.tables.pxf.ExternalTable; import org.postgresql.util.PSQLException; import org.greenplum.pxf.automation.utils.exception.ExceptionUtils; @@ -158,7 +159,31 @@ public void insertData(Table source, Table target) throws Exception { dataStringBuilder.append(","); } } - runQuery("INSERT INTO " + target.getName() + " VALUES " + dataStringBuilder); + insertData(dataStringBuilder.toString(), target); + } + + /** + * Inserts data from the provided string into the target Table. The string is expected to contain data + * in SQL format that follows the 'INSERT INTO [table] VALUES ' clause. + * + * @param data string containing data to insert + * @param target table to insert data into, can be an internal, an external or a foreign table + * @throws Exception is operation fails + */ + public void insertData(String data, Table target) throws Exception { + if (!data.startsWith("(")) { + data = "(" + data; + } + if (!data.endsWith(")")) { + data = data + ")"; + } + + String query = "INSERT INTO " + target.getName() + " VALUES " + data; + if (target instanceof ExternalTable) { + runQueryInsertIntoExternalTable(query); + } else { + runQuery(query); + } } @Override @@ -191,6 +216,16 @@ public long runQueryTiming(String query) throws Exception { return System.currentTimeMillis() - startTimeInMillis; } + /** + * Run query that inserts data into an external or a foreign table and ignores a warning about not being able to + * analyze a foreign table (if applicable) because PXF FDW does not yet support analyzing foreign tables. + * @param query query to run + * @throws Exception + */ + protected void runQueryInsertIntoExternalTable(String query) throws Exception { + runQueryWithExpectedWarning(query, ".* --- cannot analyze this foreign table", true, true); + } + /** * Run query which expected to get warning in execution and match it to expected one. * diff --git a/automation/src/main/java/org/greenplum/pxf/automation/components/gpdb/Gpdb.java b/automation/src/main/java/org/greenplum/pxf/automation/components/gpdb/Gpdb.java index 8f3353ece0..b7cf7c66e5 100755 --- a/automation/src/main/java/org/greenplum/pxf/automation/components/gpdb/Gpdb.java +++ b/automation/src/main/java/org/greenplum/pxf/automation/components/gpdb/Gpdb.java @@ -5,6 +5,7 @@ import org.greenplum.pxf.automation.components.common.DbSystemObject; import org.greenplum.pxf.automation.components.common.ShellSystemObject; import org.greenplum.pxf.automation.structures.tables.basic.Table; +import org.greenplum.pxf.automation.structures.tables.pxf.ExternalTable; import org.greenplum.pxf.automation.utils.jsystem.report.ReportUtils; import org.greenplum.pxf.automation.utils.system.FDWUtils; import org.springframework.util.Assert; @@ -106,16 +107,94 @@ private void initPort() { /** * Copies data from source table into target table - * @param source - * @param target - * @throws Exception + * @param source source table + * @param target target table + * @throws Exception if the operation fails + */ + public void copyData(Table source, Table target) throws Exception { + + copyData(source, target, false); + } + + /** + * Copies data from source table into target table + * @param sourceName name of the source table + * @param target target table + * @throws Exception if the operation fails */ - public void copyData(Table source, Table target) throws Exception { + public void copyData(String sourceName, Table target) throws Exception { + copyData(sourceName, target, false); + } + + /** + * Copies data from source table into target table + * @param sourceName name of the source table + * @param target target table + * @param columns columns to select from the source table, if null then all columns will be selected + * @throws Exception if the operation fails + */ + public void copyData(String sourceName, Table target, String[] columns) throws Exception { + + copyData(sourceName, target, columns,false, null); + } + + /** + * Copies data from source table into target table + * @param sourceName name of the source table + * @param target target table + * @param columns columns to select from the source table, if null then all columns will be selected + * @param context extra context to add before the SQL query + * @throws Exception if the operation fails + */ + public void copyData(String sourceName, Table target, String[] columns, String context) throws Exception { + + copyData(sourceName, target, columns, false, context); + } + + /** + * Copies data from source table into target table + * @param source source table + * @param target target table + * @param ignoreFail whether to ignore any failures + * @throws Exception if the operation fails + */ + public void copyData(Table source, Table target, boolean ignoreFail) throws Exception { + copyData(source.getName(), target, ignoreFail); + } + + /** + * Copies data from source table into target table + * @param sourceName name of the source table + * @param target target table + * @param ignoreFail whether to ignore any failures + * @throws Exception if the operation fails + */ + public void copyData(String sourceName, Table target, boolean ignoreFail) throws Exception { + copyData(sourceName, target, null, ignoreFail, null); + } - runQuery("INSERT INTO " + target.getName() + " SELECT * FROM " - + source.getName()); - } + /** + * Copies data from the source table into the target table with a possibility of selecting specific columns. + * If the columns are specified, the schema of the target table should correspond to the projection achieved + * by specifying the columns. If the columns are not provided, all columns are selected from the source table. + * @param sourceName name of the source table + * @param target target table + * @param columns columns to select from the source table, if null then all columns will be selected + * @param ignoreFail whether to ignore any failures + * @param context extra context to add before the SQL query + * @throws Exception if the operation fails + */ + public void copyData(String sourceName, Table target, String[] columns, boolean ignoreFail, String context) throws Exception { + String columnList = (columns == null || columns.length == 0) ? "*" : String.join(",", columns); + String query = String.format("%sINSERT INTO %s SELECT %s FROM %s", + StringUtils.isBlank(context) ? "" : context + "; " ,target.getName(), columnList, sourceName); + if (target instanceof ExternalTable) { + runQueryInsertIntoExternalTable(query); + } else { + runQuery(query, ignoreFail, false); + } + } public void connectToDataBase(String dbName) throws Exception { super.close(); @@ -184,6 +263,8 @@ private void createForeignServers(boolean ignoreFail) throws Exception { "default_adl", "default_wasbs", "s3_s3", + "s3-invalid_s3", + "s3-non-existent_s3", "hdfs-non-secure_hdfs", "hdfs-secure_hdfs", "hdfs-ipa_hdfs", @@ -301,29 +382,20 @@ public void copyFromStdin(Table from, Table to, String delim, boolean csv) throw } StringBuilder dataStringBuilder = new StringBuilder(); - List> data = from.getData(); - for (int i = 0; i < data.size(); i++) { - List row = data.get(i); - for (int j = 0; j < row.size(); j++) { - dataStringBuilder.append(row.get(j)); - if (j != row.size() - 1) { dataStringBuilder.append(delimeter); } } - dataStringBuilder.append("\n"); - } - dataStringBuilder.append("\\."); - copy(to.getName(), "STDIN", dataStringBuilder.toString(), delim, null, csv); + copyWithOptionalCTAS("STDIN", to, dataStringBuilder.toString(), delim, null, csv); } /** @@ -338,7 +410,21 @@ public void copyFromStdin(Table from, Table to, String delim, boolean csv) throw public void copyFromFile(Table to, File path, String delim, boolean csv) throws Exception { String from = "'" + path.getAbsolutePath() + "'"; copyLocalFileToRemoteGpdb(from); - copy(to.getName(), from, null, delim, null, csv); + copyWithOptionalCTAS(from, to, null, delim, null, csv); + } + + private void copyWithOptionalCTAS(String from, Table to, String dataToCopy, String delim, String nullChar, boolean csv) throws Exception { + // COPY TO is not supported in PXF FDW with GP6, so we will have to do a workaround by + // creating a native table, copying data from the file into it and then performing a CTAS into the foreign table + if (FDWUtils.useFDW && getVersion() < 7) { + Table nativeTable = createTableLike(to.getName() + "_native", to); + // copy data into the native table + copy(nativeTable.getName(), from, dataToCopy, delim, null, csv); + // CTAS into the foreign table + copyData(nativeTable, to, true); + } else { + copy(to.getName(), from, dataToCopy, delim, null, csv); + } } private void copyLocalFileToRemoteGpdb(String from) throws Exception { @@ -498,4 +584,25 @@ SELECT COUNT(*) FROM pg_catalog.pg_user_mapping um return count > 0; } + /** + * Create a table like the other table, only schema / distribution is copied, not the data. + * @param name name of table to create + * @param source source table + * @return table that got created + * @throws Exception if the operation fails + */ + private Table createTableLike(String name, Table source) throws Exception { + Table table = new Table(name, source.getFields()); + String[] distributionFields = source.getDistributionFields(); + if (distributionFields != null && distributionFields.length > 0) { + table.setDistributionFields(distributionFields); + } else { + // set distribution field as the first one so that PSQL does not issue a warning + // extract the name of the first table field and type, split off the type that follows the name after whitespace + table.setDistributionFields(new String[]{table.getFields()[0].split("\\s+")[0]}); + } + createTableAndVerify(table); + return table; + } + } diff --git a/automation/src/main/java/org/greenplum/pxf/automation/components/hdfs/Hdfs.java b/automation/src/main/java/org/greenplum/pxf/automation/components/hdfs/Hdfs.java index 5038d3369d..80a25cc2e4 100755 --- a/automation/src/main/java/org/greenplum/pxf/automation/components/hdfs/Hdfs.java +++ b/automation/src/main/java/org/greenplum/pxf/automation/components/hdfs/Hdfs.java @@ -37,6 +37,7 @@ import java.io.DataOutputStream; import java.io.File; import java.io.FileInputStream; +import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStream; @@ -51,6 +52,7 @@ import java.util.List; import java.util.UUID; +import static java.lang.Thread.sleep; import static org.testng.Assert.assertEquals; /** @@ -237,10 +239,37 @@ private Path getDatapath(String path) { @Override public ArrayList list(String path) throws Exception { ReportUtils.startLevel(report, getClass(), "List From " + path); - RemoteIterator list = fs.listFiles(getDatapath(path), true); + RemoteIterator list = null; + // build resilience to long delays for cloud and NFS mounts in CI + Exception savedException = null; + int attempt = 0; + while (list == null && attempt++ < 120) { + try { + list = fs.listFiles(getDatapath(path), true); + } catch (FileNotFoundException e) { + savedException = e; + ReportUtils.report(report, getClass(), + String.format("Directory %s does not exist, attempt %d, will retry in 1 sec", path, attempt)); + sleep(1000); + } + } + if (list == null) { + ReportUtils.report(report, getClass(), + String.format("Directory %s does not exist, max attempts exceeded, throwing exception", path)); + throw savedException; + + } ArrayList filesList = new ArrayList<>(); while (list.hasNext()) { - filesList.add(list.next().getPath().toString()); + String pathToFile = list.next().getPath().toString(); + // make sure the file is available, saw flakes on Cloud that files were not available even if they were listed + int fileAttempt = 0; + while (!doesFileExist(pathToFile) && fileAttempt++ < 120) { + ReportUtils.report(report, getClass(), + String.format("File %s does not exist, attempt %d, will retry in 1 sec", pathToFile, fileAttempt)); + sleep(1000); + } + filesList.add(pathToFile); } ReportUtils.report(report, getClass(), filesList.toString()); ReportUtils.stopLevel(report); diff --git a/automation/src/main/java/org/greenplum/pxf/automation/structures/tables/pxf/ExternalTable.java b/automation/src/main/java/org/greenplum/pxf/automation/structures/tables/pxf/ExternalTable.java index 7e35999838..ff8b03d822 100755 --- a/automation/src/main/java/org/greenplum/pxf/automation/structures/tables/pxf/ExternalTable.java +++ b/automation/src/main/java/org/greenplum/pxf/automation/structures/tables/pxf/ExternalTable.java @@ -180,32 +180,32 @@ protected void appendParameter(StringBuilder sBuilder, String parameter) { @Override public String constructCreateStmt() { - String createStatment = ""; + String createStatement = ""; - createStatment += createHeader(); - createStatment += createFields(); - createStatment += createLocation(); + createStatement += createHeader(); + createStatement += createFields(); + createStatement += createLocation(); if (getFormat() != null) { - createStatment += " FORMAT '" + getFormat() + "'"; + createStatement += " FORMAT '" + getFormat() + "'"; } if (getFormatter() != null) { String formatterOption = isFormatterMixedCase() ? "FoRmAtTeR" : "formatter"; - createStatment += String.format(" (%s='%s'", formatterOption, getFormatter()); + createStatement += String.format(" (%s='%s'", formatterOption, getFormatter()); if (formatterOptions.size() > 0) { - createStatment += ", "; - createStatment += formatterOptions.stream().collect(Collectors.joining(", ")); + createStatement += ", "; + createStatement += formatterOptions.stream().collect(Collectors.joining(", ")); } - createStatment += ")"; + createStatement += ")"; } boolean hasDelimiterOrEscapeOrNewLine = getDelimiter() != null || getEscape() != null || getNewLine() != null; if (hasDelimiterOrEscapeOrNewLine) { - createStatment += " ("; + createStatement += " ("; } if (getDelimiter() != null) { @@ -215,7 +215,7 @@ public String constructCreateStmt() { if (!parsedDelimiter.startsWith("E")) { parsedDelimiter = "'" + parsedDelimiter + "'"; } - createStatment += " DELIMITER " + parsedDelimiter ; + createStatement += " DELIMITER " + parsedDelimiter ; } if (getEscape() != null) { @@ -225,34 +225,35 @@ public String constructCreateStmt() { if (!parsedEscapeCharacter.startsWith("E")) { parsedEscapeCharacter = "'" + parsedEscapeCharacter + "'"; } - createStatment += " ESCAPE " + parsedEscapeCharacter; + createStatement += " ESCAPE " + parsedEscapeCharacter; } if (getNewLine() != null) { String newLineCharacter = getNewLine(); - createStatment += " NEWLINE '" + newLineCharacter + "'"; + createStatement += " NEWLINE '" + newLineCharacter + "'"; } if (hasDelimiterOrEscapeOrNewLine) { - createStatment += ")"; + createStatement += ")"; } if (getEncoding() != null) { - createStatment += " ENCODING '" + getEncoding() + "'"; + createStatement += " ENCODING '" + getEncoding() + "'"; } if (getErrorTable() != null) { - createStatment += " LOG ERRORS"; + createStatement += " LOG ERRORS"; } if (getSegmentRejectLimit() > 0) { - createStatment += " SEGMENT REJECT LIMIT " + createStatement += " SEGMENT REJECT LIMIT " + getSegmentRejectLimit() + " " + getSegmentRejectLimitType(); } - return createStatment; + createStatement += distribution(); + return createStatement; } public String getHost() { diff --git a/automation/src/main/java/org/greenplum/pxf/automation/structures/tables/pxf/ForeignTable.java b/automation/src/main/java/org/greenplum/pxf/automation/structures/tables/pxf/ForeignTable.java index 0422558d51..ccf6a3231c 100644 --- a/automation/src/main/java/org/greenplum/pxf/automation/structures/tables/pxf/ForeignTable.java +++ b/automation/src/main/java/org/greenplum/pxf/automation/structures/tables/pxf/ForeignTable.java @@ -39,6 +39,7 @@ protected String createServer() { String[] serverParameters = StringUtils.defaultIfBlank(getServer(), "default").split("="); // getServer() might return a string "server=<..>", strip the prefix int index = serverParameters.length > 1 ? 1 : 0; + // foreign server names will have underscores instead of dashes return String.format(" SERVER %s_%s", serverParameters[index].replace("-","_"), getProtocol()); } @@ -53,6 +54,9 @@ protected String createOptions() { appendOption(joiner, "format", formatOption); } + if (getCompressionCodec() != null) { + appendOption(joiner, "compression_codec", getCompressionCodec()); + } // process F/A/R as options, they are used in tests to test column projection / predicate pushdown if (getFragmenter() != null) { appendOption(joiner, "fragmenter", getFragmenter()); @@ -184,7 +188,8 @@ private String getProtocol() { private String[] getProfileParts() { if (getProfile() == null) { - // TODO: what will we do with tests that set F/A/R directly without a profile ? + // tests that set F/A/R directly without a profile need to be registered to 'test_fdw' created for testing + // specifically that defines pseudo protocol 'test' throw new IllegalStateException("Cannot create foreign table when profile is not specified"); } return getProfile().split(":"); diff --git a/automation/src/test/java/org/greenplum/pxf/automation/features/avro/HdfsReadableAvroTest.java b/automation/src/test/java/org/greenplum/pxf/automation/features/avro/HdfsReadableAvroTest.java index 07c8ce59a6..db0f79189f 100755 --- a/automation/src/test/java/org/greenplum/pxf/automation/features/avro/HdfsReadableAvroTest.java +++ b/automation/src/test/java/org/greenplum/pxf/automation/features/avro/HdfsReadableAvroTest.java @@ -1,16 +1,13 @@ package org.greenplum.pxf.automation.features.avro; -import annotations.FailsWithFDW; import annotations.WorksWithFDW; import org.greenplum.pxf.automation.components.cluster.PhdCluster; import org.greenplum.pxf.automation.datapreparer.CustomAvroPreparer; import org.greenplum.pxf.automation.features.BaseFeature; import org.greenplum.pxf.automation.fileformats.IAvroSchema; import org.greenplum.pxf.automation.structures.tables.basic.Table; -import org.greenplum.pxf.automation.structures.tables.pxf.ReadableExternalTable; import org.greenplum.pxf.automation.structures.tables.utils.TableFactory; import org.greenplum.pxf.automation.utils.fileformats.FileFormatsUtils; -import org.greenplum.pxf.automation.utils.system.ProtocolEnum; import org.greenplum.pxf.automation.utils.system.ProtocolUtils; import org.testng.annotations.Test; diff --git a/automation/src/test/java/org/greenplum/pxf/automation/features/avro/HdfsWritableAvroTest.java b/automation/src/test/java/org/greenplum/pxf/automation/features/avro/HdfsWritableAvroTest.java index 4aecd6df71..dd042f748a 100644 --- a/automation/src/test/java/org/greenplum/pxf/automation/features/avro/HdfsWritableAvroTest.java +++ b/automation/src/test/java/org/greenplum/pxf/automation/features/avro/HdfsWritableAvroTest.java @@ -1,11 +1,10 @@ package org.greenplum.pxf.automation.features.avro; -import annotations.FailsWithFDW; +import annotations.WorksWithFDW; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; -import org.greenplum.pxf.automation.features.BaseFeature; -import org.greenplum.pxf.automation.structures.tables.pxf.ReadableExternalTable; -import org.greenplum.pxf.automation.structures.tables.pxf.WritableExternalTable; +import org.greenplum.pxf.automation.features.BaseWritableFeature; +import org.greenplum.pxf.automation.structures.tables.basic.Table; import org.greenplum.pxf.automation.structures.tables.utils.TableFactory; import org.greenplum.pxf.automation.utils.jsystem.report.ReportUtils; import org.greenplum.pxf.automation.utils.system.ProtocolEnum; @@ -24,10 +23,9 @@ import static java.lang.Thread.sleep; import static org.junit.Assert.assertEquals; -@FailsWithFDW -public class HdfsWritableAvroTest extends BaseFeature { +@WorksWithFDW +public class HdfsWritableAvroTest extends BaseWritableFeature { - private ReadableExternalTable readableExternalTable; private ArrayList filesToDelete; private static final String[] AVRO_PRIMITIVE_WRITABLE_TABLE_COLS = new String[]{ "type_int int", @@ -105,8 +103,7 @@ public class HdfsWritableAvroTest extends BaseFeature { "type_numeric_array TEXT[]", "type_string_array TEXT[]" }; - private String gpdbTable; - private String hdfsPath; + private String tableNamePrefix; private String publicStage; private String resourcePath; private String fullTestPath; @@ -115,7 +112,7 @@ public class HdfsWritableAvroTest extends BaseFeature { @Override public void beforeClass() throws Exception { // path for storing data on HDFS (for processing by PXF) - hdfsPath = hdfs.getWorkingDirectory() + "/writableAvro/"; + hdfsWritePath = hdfs.getWorkingDirectory() + "/writableAvro/"; String absolutePath = Objects.requireNonNull(getClass().getClassLoader().getResource("data")).getPath(); resourcePath = absolutePath + "/avro/"; @@ -130,17 +127,16 @@ public void beforeMethod() throws Exception { @Test(groups = {"features", "gpdb", "hcfs", "security"}) public void generateSchemaPrimitive() throws Exception { - gpdbTable = "writable_avro_primitive_generate_schema"; - fullTestPath = hdfsPath + "generate_schema_primitive_types"; - prepareWritableExternalTable(gpdbTable, AVRO_PRIMITIVE_WRITABLE_TABLE_COLS, fullTestPath); - exTable.setUserParameters(new String[]{"COMPRESSION_CODEC=xz"}); + tableNamePrefix = "writable_avro_primitive_generate_schema"; + fullTestPath = hdfsWritePath + "generate_schema_primitive_types"; + prepareWritableExternalTable(tableNamePrefix, AVRO_PRIMITIVE_WRITABLE_TABLE_COLS, fullTestPath); + writableExTable.setUserParameters(new String[]{"COMPRESSION_CODEC=xz"}); + gpdb.createTableAndVerify(writableExTable); - prepareReadableExternalTable(gpdbTable, AVRO_PRIMITIVE_READABLE_TABLE_COLS, fullTestPath); + prepareReadableExternalTable(tableNamePrefix, AVRO_PRIMITIVE_READABLE_TABLE_COLS, fullTestPath); + gpdb.createTableAndVerify(readableExTable); - gpdb.createTableAndVerify(readableExternalTable); - gpdb.createTableAndVerify(exTable); - - insertPrimitives(gpdbTable); + insertPrimitives(writableExTable); publicStage += "generateSchemaPrimitive/"; // fetch all the segment-generated avro files and make them into json records @@ -153,17 +149,16 @@ public void generateSchemaPrimitive() throws Exception { @Test(groups = {"features", "gpdb", "hcfs", "security"}) public void generateSchemaPrimitive_withNoCompression() throws Exception { - gpdbTable = "writable_avro_primitive_no_compression"; - fullTestPath = hdfsPath + "generate_schema_primitive_types_with_no_compression"; - prepareWritableExternalTable(gpdbTable, AVRO_PRIMITIVE_WRITABLE_TABLE_COLS, fullTestPath); - exTable.setUserParameters(new String[]{"COMPRESSION_CODEC=uncompressed"}); - - prepareReadableExternalTable(gpdbTable, AVRO_PRIMITIVE_READABLE_TABLE_COLS, fullTestPath); + tableNamePrefix = "writable_avro_primitive_no_compression"; + fullTestPath = hdfsWritePath + "generate_schema_primitive_types_with_no_compression"; + prepareWritableExternalTable(tableNamePrefix, AVRO_PRIMITIVE_WRITABLE_TABLE_COLS, fullTestPath); + writableExTable.setUserParameters(new String[]{"COMPRESSION_CODEC=uncompressed"}); + gpdb.createTableAndVerify(writableExTable); - gpdb.createTableAndVerify(readableExternalTable); - gpdb.createTableAndVerify(exTable); + prepareReadableExternalTable(tableNamePrefix, AVRO_PRIMITIVE_READABLE_TABLE_COLS, fullTestPath); + gpdb.createTableAndVerify(readableExTable); - insertPrimitives(gpdbTable); + insertPrimitives(writableExTable); publicStage += "generateSchemaPrimitive_withNoCompression/"; // fetch all the segment-generated avro files and make them into json records @@ -176,18 +171,17 @@ public void generateSchemaPrimitive_withNoCompression() throws Exception { @Test(groups = {"features", "gpdb", "hcfs", "security"}) public void generateSchemaComplex() throws Exception { - gpdbTable = "writable_avro_complex_generate_schema"; + tableNamePrefix = "writable_avro_complex_generate_schema"; createComplexTypes(); - fullTestPath = hdfsPath + "generate_schema_complex_types"; - prepareWritableExternalTable(gpdbTable, AVRO_COMPLEX_TABLE_COLS_WRITABLE, fullTestPath); - exTable.setUserParameters(new String[]{"COMPRESSION_CODEC=bzip2"}); - - prepareReadableExternalTable(gpdbTable, AVRO_COMPLEX_TABLE_COLS_W_ARRAYS_READABLE, fullTestPath); + fullTestPath = hdfsWritePath + "generate_schema_complex_types"; + prepareWritableExternalTable(tableNamePrefix, AVRO_COMPLEX_TABLE_COLS_WRITABLE, fullTestPath); + writableExTable.setUserParameters(new String[]{"COMPRESSION_CODEC=bzip2"}); + gpdb.createTableAndVerify(writableExTable); - gpdb.createTableAndVerify(readableExternalTable); - gpdb.createTableAndVerify(exTable); + prepareReadableExternalTable(tableNamePrefix, AVRO_COMPLEX_TABLE_COLS_W_ARRAYS_READABLE, fullTestPath); + gpdb.createTableAndVerify(readableExTable); - insertComplex(gpdbTable); + insertComplex(writableExTable); publicStage += "generateSchemaComplex/"; // fetch all the segment-generated avro files and make them into json records @@ -200,24 +194,24 @@ public void generateSchemaComplex() throws Exception { @Test(groups = {"features", "gpdb", "hcfs", "security"}) public void userProvidedSchemaFileOnHcfsPrimitive() throws Exception { - gpdbTable = "writable_avro_primitive_user_provided_schema_on_hcfs"; - fullTestPath = hdfsPath + "primitive_user_provided_schema_on_hcfs"; - prepareWritableExternalTable(gpdbTable, AVRO_PRIMITIVE_WRITABLE_TABLE_COLS, fullTestPath); - exTable.setUserParameters(new String[]{"COMPRESSION_CODEC=snappy"}); + tableNamePrefix = "writable_avro_primitive_user_provided_schema_on_hcfs"; + fullTestPath = hdfsWritePath + "primitive_user_provided_schema_on_hcfs"; + prepareWritableExternalTable(tableNamePrefix, AVRO_PRIMITIVE_WRITABLE_TABLE_COLS, fullTestPath); + writableExTable.setUserParameters(new String[]{"COMPRESSION_CODEC=snappy"}); - prepareReadableExternalTable(gpdbTable, AVRO_PRIMITIVE_READABLE_TABLE_COLS, fullTestPath); - gpdb.createTableAndVerify(readableExternalTable); - - String schemaPath = hdfsPath.replaceFirst("/$", "_schema/primitives_no_union.avsc"); + String schemaPath = hdfsWritePath.replaceFirst("/$", "_schema/primitives_no_union.avsc"); // copy a schema file to HCFS that has no UNION types, just the raw underlying types. // the Avro files should thus be different from those without user-provided schema hdfs.copyFromLocal(resourcePath + "primitives_no_union.avsc", schemaPath); schemaPath = "/" + schemaPath; - exTable.setExternalDataSchema(schemaPath); - gpdb.createTableAndVerify(exTable); + writableExTable.setExternalDataSchema(schemaPath); + gpdb.createTableAndVerify(writableExTable); + + prepareReadableExternalTable(tableNamePrefix, AVRO_PRIMITIVE_READABLE_TABLE_COLS, fullTestPath); + gpdb.createTableAndVerify(readableExTable); - insertPrimitives(gpdbTable); + insertPrimitives(writableExTable); publicStage += "userProvidedSchemaFileOnHcfsPrimitive/"; // fetch all the segment-generated avro files and make them into json records @@ -231,17 +225,12 @@ public void userProvidedSchemaFileOnHcfsPrimitive() throws Exception { @Test(groups = {"features", "gpdb", "hcfs", "security"}) public void userProvidedSchemaFileOnClasspathComplexTypesAsText() throws Exception { createComplexTypes(); - gpdbTable = "writable_avro_complex_user_schema_on_classpath"; - fullTestPath = hdfsPath + "complex_user_schema_on_classpath"; - prepareWritableExternalTable(gpdbTable, + tableNamePrefix = "writable_avro_complex_user_schema_on_classpath"; + fullTestPath = hdfsWritePath + "complex_user_schema_on_classpath"; + prepareWritableExternalTable(tableNamePrefix, AVRO_COMPLEX_TABLE_COLS_WRITABLE, fullTestPath); - prepareReadableExternalTable(gpdbTable, - AVRO_COMPLEX_TABLE_COLS_AS_TEXT_READABLE, - fullTestPath); - gpdb.createTableAndVerify(readableExternalTable); - // copy a schema file to PXF's classpath on cluster that has no UNION types, just the raw underlying types. // the Avro files should thus be different from those without user-provided schema // this file is generated using Avro tools: http://avro.apache.org/releases.html @@ -253,10 +242,15 @@ public void userProvidedSchemaFileOnClasspathComplexTypesAsText() throws Excepti cluster.copyFileToNodes(new File(resourcePath + "complex_no_union.avro").getAbsolutePath(), cluster.getPxfConfLocation(), false, false); - exTable.setExternalDataSchema("complex_no_union.avro"); - gpdb.createTableAndVerify(exTable); + writableExTable.setExternalDataSchema("complex_no_union.avro"); + gpdb.createTableAndVerify(writableExTable); + + prepareReadableExternalTable(tableNamePrefix, + AVRO_COMPLEX_TABLE_COLS_AS_TEXT_READABLE, + fullTestPath); + gpdb.createTableAndVerify(readableExTable); - insertComplex(gpdbTable); + insertComplex(writableExTable); publicStage += "userProvidedSchemaFileOnClasspathComplex/"; // fetch all the segment-generated avro files and make them into json records @@ -269,24 +263,24 @@ public void userProvidedSchemaFileOnClasspathComplexTypesAsText() throws Excepti @Test(groups = {"features", "gpdb", "hcfs", "security"}) public void userProvidedSchemaFileGPDBArraysAsAvroArraysWithNulls() throws Exception { - gpdbTable = "writable_avro_array_user_schema_w_nulls"; - fullTestPath = hdfsPath + "array_user_schema_w_nulls"; - prepareWritableExternalTable(gpdbTable, + tableNamePrefix = "writable_avro_array_user_schema_w_nulls"; + fullTestPath = hdfsWritePath + "array_user_schema_w_nulls"; + prepareWritableExternalTable(tableNamePrefix, AVRO_ARRAY_TABLE_COLS_WRITABLE, fullTestPath); - prepareReadableExternalTable(gpdbTable, - AVRO_ARRAY_TABLE_COLS_READABLE, - fullTestPath); - gpdb.createTableAndVerify(readableExternalTable); - cluster.copyFileToNodes(new File(resourcePath + "array_with_nulls.avsc").getAbsolutePath(), cluster.getPxfConfLocation(), false, false); - exTable.setExternalDataSchema("array_with_nulls.avsc"); - gpdb.createTableAndVerify(exTable); + writableExTable.setExternalDataSchema("array_with_nulls.avsc"); + gpdb.createTableAndVerify(writableExTable); + + prepareReadableExternalTable(tableNamePrefix, + AVRO_ARRAY_TABLE_COLS_READABLE, + fullTestPath); + gpdb.createTableAndVerify(readableExTable); - insertComplexNullArrays(gpdbTable); + insertComplexNullArrays(writableExTable); publicStage += "userProvidedSchemaArrayWithNullsComplex/"; // fetch all the segment-generated avro files and make them into json records @@ -299,17 +293,16 @@ public void userProvidedSchemaFileGPDBArraysAsAvroArraysWithNulls() throws Excep @Test(groups = {"features", "gpdb", "hcfs", "security"}) public void generateSchemaWithNullValuesComplex() throws Exception { - gpdbTable = "writable_avro_null_values"; + tableNamePrefix = "writable_avro_null_values"; createComplexTypes(); - fullTestPath = hdfsPath + "null_values"; - prepareWritableExternalTable(gpdbTable, AVRO_COMPLEX_TABLE_COLS_WRITABLE, fullTestPath); - - prepareReadableExternalTable(gpdbTable, AVRO_COMPLEX_TABLE_COLS_W_ARRAYS_READABLE, fullTestPath); + fullTestPath = hdfsWritePath + "null_values"; + prepareWritableExternalTable(tableNamePrefix, AVRO_COMPLEX_TABLE_COLS_WRITABLE, fullTestPath); + gpdb.createTableAndVerify(writableExTable); - gpdb.createTableAndVerify(readableExternalTable); - gpdb.createTableAndVerify(exTable); + prepareReadableExternalTable(tableNamePrefix, AVRO_COMPLEX_TABLE_COLS_W_ARRAYS_READABLE, fullTestPath); + gpdb.createTableAndVerify(readableExTable); - insertComplexWithNulls(gpdbTable); + insertComplexWithNulls(writableExTable); publicStage += "nullValues/"; // fetch all the segment-generated avro files and make them into json records @@ -322,10 +315,10 @@ public void generateSchemaWithNullValuesComplex() throws Exception { @Override protected void afterMethod() throws Exception { - super.afterMethod(); if (ProtocolUtils.getPxfTestKeepData().equals("true")) { return; } + super.afterMethod(); if (filesToDelete != null) { for (File file : filesToDelete) { if (!file.delete()) { @@ -349,62 +342,62 @@ private void dropComplexTypes() throws Exception { } } - private void insertPrimitives(String exTable) throws Exception { - gpdb.runQuery("INSERT INTO " + exTable + "_writable " + "SELECT " + - "i, " + // type_int - "i, " + // type_smallint - "i*100000000000, " + // type_long - "i+1.0001, " + // type_float - "i*100000.0001, " + // type_double - "'row_' || i::varchar(255), " + // type_string - "('bytes for ' || i::varchar(255))::bytea, " + // type_bytes - "CASE WHEN (i%2) = 0 THEN TRUE ELSE FALSE END, " + // type_boolean - "'character row ' || i::char(3)," + // type_character - "'character varying row ' || i::varchar(3)" + // type_varchar - "from generate_series(1, 100) s(i);"); + private void insertPrimitives(Table exTable) throws Exception { + gpdb.copyData("generate_series(1, 100) s(i)", exTable, new String[]{ + "i", // type_int + "i", // type_smallint + "i*100000000000", // type_long + "i+1.0001", // type_float + "i*100000.0001", // type_double + "'row_' || i::varchar(255)", // type_string + "('bytes for ' || i::varchar(255))::bytea", // type_bytes + "CASE WHEN (i%2) = 0 THEN TRUE ELSE FALSE END", // type_boolean + "'character row ' || i::char(3)", // type_character + "'character varying row ' || i::varchar(3)" // type_varchar + }); } - private void insertComplex(String gpdbTable) throws Exception { - gpdb.runQuery("SET TIMEZONE=-7;" + "INSERT INTO " + gpdbTable + "_writable " + " SELECT " + - "i, " + - "('(' || CASE WHEN (i%2) = 0 THEN FALSE ELSE TRUE END || ',' || (i*2)::VARCHAR(255) || ')')::struct, " + - "CASE WHEN (i%2) = 0 THEN 'sad' ELSE 'happy' END::mood," + - "('{' || i::VARCHAR(255) || ',' || (i*10)::VARCHAR(255) || ',' || (i*100)::VARCHAR(255) || '}')::BIGINT[], " + - "('{' || (i*1.0001)::VARCHAR(255) || ',' || ((i*10.00001)*10)::VARCHAR(255) || ',' || ((i*100.000001)*100)::VARCHAR(255) || '}')::NUMERIC(8,1)[], " + - "('{\"item ' || ((i-1)*10)::VARCHAR(255) || '\",\"item ' || (i*10)::VARCHAR(255) || '\",\"item ' || ((i+1)*10)::VARCHAR(255) || '\"}')::TEXT[], " + - "DATE '2001-09-28' + i, " + - "TIME '00:00:00' + (i::VARCHAR(255) || ' seconds')::interval, " + - "TIMESTAMP '2001-09-28 01:00' + (i::VARCHAR(255) || ' days')::interval + (i::VARCHAR(255) || ' hours')::interval, " + - "TIMESTAMP WITH TIME ZONE '2001-09-28 01:00-07' + (i::VARCHAR(255) || ' days')::interval + (i::VARCHAR(255) || ' hours')::interval " + - "FROM generate_series(1, 100) s(i);"); + private void insertComplex(Table exTable) throws Exception { + gpdb.copyData("generate_series(1, 100) s(i)", exTable, new String[]{ + "i", + "('(' || CASE WHEN (i%2) = 0 THEN FALSE ELSE TRUE END || ',' || (i*2)::VARCHAR(255) || ')')::struct", + "CASE WHEN (i%2) = 0 THEN 'sad' ELSE 'happy' END::mood", + "('{' || i::VARCHAR(255) || ',' || (i*10)::VARCHAR(255) || ',' || (i*100)::VARCHAR(255) || '}')::BIGINT[]", + "('{' || (i*1.0001)::VARCHAR(255) || ',' || ((i*10.00001)*10)::VARCHAR(255) || ',' || ((i*100.000001)*100)::VARCHAR(255) || '}')::NUMERIC(8,1)[]", + "('{\"item ' || ((i-1)*10)::VARCHAR(255) || '\",\"item ' || (i*10)::VARCHAR(255) || '\",\"item ' || ((i+1)*10)::VARCHAR(255) || '\"}')::TEXT[]", + "DATE '2001-09-28' + i", + "TIME '00:00:00' + (i::VARCHAR(255) || ' seconds')::interval", + "TIMESTAMP '2001-09-28 01:00' + (i::VARCHAR(255) || ' days')::interval + (i::VARCHAR(255) || ' hours')::interval", + "TIMESTAMP WITH TIME ZONE '2001-09-28 01:00-07' + (i::VARCHAR(255) || ' days')::interval + (i::VARCHAR(255) || ' hours')::interval" + }, "SET TIMEZONE=-7"); } - private void insertComplexWithNulls(String gpdbTable) throws Exception { - gpdb.runQuery("SET TIMEZONE=-7;" + "INSERT INTO " + gpdbTable + "_writable " + " SELECT " + - "i, " + - "('(' || CASE WHEN (i%2) = 0 THEN FALSE ELSE TRUE END || ', ' || (i*2)::VARCHAR(255) || ')')::struct, " + - "CASE WHEN (i%3) = 0 THEN 'sad' WHEN (i%2) = 0 THEN 'happy' ELSE NULL END::mood, " + - "('{' || i::VARCHAR(255) || ',' || (i*10)::VARCHAR(255) || ',' || (i*100)::VARCHAR(255) || '}')::BIGINT[], " + - "('{' || (i*1.0001)::VARCHAR(255) || ',' || ((i*10.00001)*10)::VARCHAR(255) || ',' || ((i*100.000001)*100)::VARCHAR(255) || '}')::NUMERIC(8,1)[], " + - "('{\"item ' || ((i-1)*10)::VARCHAR(255) || '\",\"item ' || (i*10)::VARCHAR(255) || '\",\"item ' || ((i+1)*10)::VARCHAR(255) || '\"}')::TEXT[], " + - "DATE '2001-09-28' + i, " + - "TIME '00:00:00' + (i::VARCHAR(255) || ' seconds')::interval, " + - "TIMESTAMP '2001-09-28 01:00' + (i::VARCHAR(255) || ' days')::interval + (i::VARCHAR(255) || ' hours')::interval, " + - "TIMESTAMP WITH TIME ZONE '2001-09-28 01:00-07' + (i::VARCHAR(255) || ' days')::interval + (i::VARCHAR(255) || ' hours')::interval " + - "FROM generate_series(1, 100) s(i);"); + private void insertComplexWithNulls(Table exTable) throws Exception { + gpdb.copyData("generate_series(1, 100) s(i)", exTable, new String[]{ + "i", + "('(' || CASE WHEN (i%2) = 0 THEN FALSE ELSE TRUE END || ', ' || (i*2)::VARCHAR(255) || ')')::struct", + "CASE WHEN (i%3) = 0 THEN 'sad' WHEN (i%2) = 0 THEN 'happy' ELSE NULL END::mood", + "('{' || i::VARCHAR(255) || ',' || (i*10)::VARCHAR(255) || ',' || (i*100)::VARCHAR(255) || '}')::BIGINT[]", + "('{' || (i*1.0001)::VARCHAR(255) || ',' || ((i*10.00001)*10)::VARCHAR(255) || ',' || ((i*100.000001)*100)::VARCHAR(255) || '}')::NUMERIC(8,1)[]", + "('{\"item ' || ((i-1)*10)::VARCHAR(255) || '\",\"item ' || (i*10)::VARCHAR(255) || '\",\"item ' || ((i+1)*10)::VARCHAR(255) || '\"}')::TEXT[]", + "DATE '2001-09-28' + i", + "TIME '00:00:00' + (i::VARCHAR(255) || ' seconds')::interval", + "TIMESTAMP '2001-09-28 01:00' + (i::VARCHAR(255) || ' days')::interval + (i::VARCHAR(255) || ' hours')::interval", + "TIMESTAMP WITH TIME ZONE '2001-09-28 01:00-07' + (i::VARCHAR(255) || ' days')::interval + (i::VARCHAR(255) || ' hours')::interval" + }, "SET TIMEZONE=-7"); } private void - insertComplexNullArrays(String gpdbTable) throws Exception { - gpdb.runQuery("INSERT INTO " + gpdbTable + "_writable " + " VALUES " + - "(1, NULL, '{1.0001,10.00001,100.000001}', '{\"item 0\",\"item 10\",\"item 20\"}')," + - "(2, '{2,20,200}', NULL, '{\"item 0\",\"item 10\",\"item 20\"}')," + - "(3, '{3,30,300}', '{3.0001,30.00001,300.000001}', NULL )," + - "(4, '{NULL,40,400}', '{4.0001,40.00001,400.000001}', '{\"item 0\",\"item 10\",\"item 20\"}')," + - "(5, '{5,50,500}', '{5.0001,NULL,500.000001}', '{\"item 0\",\"item 10\",\"item 20\"}')," + - "(6, '{6,60,600}', '{6.0001,60.00001,600.000001}', '{\"item 0\",\"item 10\",NULL}' )," + - "(7, '{7,70,700}', '{7.0001,70.00001,700.000001}', '{\"item 0\",\"item 10\",\"item 20\"}');" - ); + insertComplexNullArrays(Table exTable) throws Exception { + String data = String.join(",", new String[] { + "(1, NULL, '{1.0001,10.00001,100.000001}', '{\"item 0\",\"item 10\",\"item 20\"}')", + "(2, '{2,20,200}', NULL, '{\"item 0\",\"item 10\",\"item 20\"}')", + "(3, '{3,30,300}', '{3.0001,30.00001,300.000001}', NULL )", + "(4, '{NULL,40,400}', '{4.0001,40.00001,400.000001}', '{\"item 0\",\"item 10\",\"item 20\"}')", + "(5, '{5,50,500}', '{5.0001,NULL,500.000001}', '{\"item 0\",\"item 10\",\"item 20\"}')", + "(6, '{6,60,600}', '{6.0001,60.00001,600.000001}', '{\"item 0\",\"item 10\",NULL}' )", + "(7, '{7,70,700}', '{7.0001,70.00001,700.000001}', '{\"item 0\",\"item 10\",\"item 20\"}')"}); + gpdb.insertData(data, exTable); } private void fetchAndVerifyAvroHcfsFiles(String compareFile, String codec) throws Exception { @@ -415,7 +408,7 @@ private void fetchAndVerifyAvroHcfsFiles(String compareFile, String codec) throw addJsonNodesToMap(jsonToCompare, resourcePath + compareFile); // for HCFS on Cloud, wait a bit for async write in previous steps to finish - if (protocol != ProtocolEnum.HDFS && protocol != ProtocolEnum.FILE) { + if (protocol != ProtocolEnum.HDFS) { sleep(10000); } for (String srcPath : hdfs.list(fullTestPath)) { @@ -423,11 +416,6 @@ private void fetchAndVerifyAvroHcfsFiles(String compareFile, String codec) throw final String filePath = publicStage + fileName; filesToDelete.add(new File(filePath + ".avro")); filesToDelete.add(new File(publicStage + "." + fileName + ".avro.crc")); - // make sure the file is available, saw flakes on Cloud that listed files were not available - int attempts = 0; - while (!hdfs.doesFileExist(srcPath) && attempts++ < 20) { - sleep(1000); - } hdfs.copyToLocal(srcPath, filePath + ".avro"); sleep(250); hdfs.writeJsonFileFromAvro("file://" + filePath + ".avro", filePath + ".json"); @@ -481,12 +469,12 @@ private void addJsonNodesToMap(Map map, String filePath) { } private void prepareWritableExternalTable(String name, String[] fields, String path) { - exTable = TableFactory.getPxfHcfsWritableTable(name + "_writable", + writableExTable = TableFactory.getPxfHcfsWritableTable(name + "_writable", fields, path, hdfs.getBasePath(), "avro"); } private void prepareReadableExternalTable(String name, String[] fields, String path) { - readableExternalTable = TableFactory.getPxfHcfsReadableTable(name + "_readable", + readableExTable = TableFactory.getPxfHcfsReadableTable(name + "_readable", fields, path, hdfs.getBasePath(),"avro"); } } diff --git a/automation/src/test/java/org/greenplum/pxf/automation/features/cloud/CloudAccessTest.java b/automation/src/test/java/org/greenplum/pxf/automation/features/cloud/CloudAccessTest.java index 4894439a8a..340229b31a 100644 --- a/automation/src/test/java/org/greenplum/pxf/automation/features/cloud/CloudAccessTest.java +++ b/automation/src/test/java/org/greenplum/pxf/automation/features/cloud/CloudAccessTest.java @@ -1,5 +1,6 @@ package org.greenplum.pxf.automation.features.cloud; +import annotations.WorksWithFDW; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.greenplum.pxf.automation.components.hdfs.Hdfs; @@ -15,6 +16,7 @@ /** * Functional CloudAccess Test */ +@WorksWithFDW public class CloudAccessTest extends BaseFeature { private static final String PROTOCOL_S3 = "s3a://"; @@ -115,7 +117,7 @@ public void testCloudAccessOkWhenServerCredsNoConfigFileExists() throws Exceptio /* * The tests below are for the case where there's a Hadoop cluster configured under "default" server - * both without and with Kerberos security, testing that clud access works in presence of "default" server + * both without and with Kerberos security, testing that cloud access works in presence of "default" server */ @Test(groups = {"gpdb", "security"}) diff --git a/automation/src/test/java/org/greenplum/pxf/automation/features/parquet/ParquetWriteTest.java b/automation/src/test/java/org/greenplum/pxf/automation/features/parquet/ParquetWriteTest.java index 1e6acf5bb8..651185a982 100644 --- a/automation/src/test/java/org/greenplum/pxf/automation/features/parquet/ParquetWriteTest.java +++ b/automation/src/test/java/org/greenplum/pxf/automation/features/parquet/ParquetWriteTest.java @@ -1,10 +1,11 @@ package org.greenplum.pxf.automation.features.parquet; +import annotations.WorksWithFDW; import com.google.common.collect.Lists; import jsystem.framework.system.SystemManagerImpl; import org.apache.commons.lang.StringUtils; import org.greenplum.pxf.automation.components.hive.Hive; -import org.greenplum.pxf.automation.features.BaseFeature; +import org.greenplum.pxf.automation.features.BaseWritableFeature; import org.greenplum.pxf.automation.structures.tables.basic.Table; import org.greenplum.pxf.automation.structures.tables.hive.HiveExternalTable; import org.greenplum.pxf.automation.structures.tables.hive.HiveTable; @@ -23,7 +24,8 @@ import static java.lang.Thread.sleep; import static org.junit.Assert.assertEquals; -public class ParquetWriteTest extends BaseFeature { +@WorksWithFDW +public class ParquetWriteTest extends BaseWritableFeature { private static final String NUMERIC_TABLE = "numeric_precision"; private static final String NUMERIC_UNDEFINED_PRECISION_TABLE = "numeric_undefined_precision"; private static final String PXF_PARQUET_PRIMITIVE_TABLE = "pxf_parquet_primitive_types"; @@ -126,6 +128,12 @@ public class ParquetWriteTest extends BaseFeature { "id INTEGER", "tm_arr TIMESTAMP[]" }; + + private final String[] PARQUET_PRIMITIVE_COLUMN_NAMES = new String[]{ + "s1", "s2", "n1", "d1", "dc1", "tm", "f", "bg", "b", "tn", "vc1", "sml", "c1", "bin"}; + + private final String[] PARQUET_ARRAY_COLUMN_NAMES = new String[]{"id", "bool_arr", "smallint_arr", "int_arr", + "bigint_arr", "real_arr", "double_arr", "text_arr", "bytea_arr", "char_arr", "varchar_arr", "numeric_arr", "date_arr"}; private String hdfsPath; private ProtocolEnum protocol; private Hive hive; @@ -152,12 +160,12 @@ public void parquetWritePaddedChar() throws Exception { runWritePrimitivesScenario("pxf_parquet_write_padded_char", "pxf_parquet_read_padded_char", "parquet_write_padded_char", null); /* 2. Insert data with chars that need padding */ - gpdb.runQuery("INSERT INTO pxf_parquet_write_padded_char VALUES ('row25_char_needs_padding', 's_17', 11, 37, 0.123456789012345679, " + - "'2013-07-23 21:00:05', 7.7, 23456789, false, 11, 'abcde', 1100, 'a ', '1')"); - gpdb.runQuery("INSERT INTO pxf_parquet_write_padded_char VALUES ('row26_char_with_tab', 's_17', 11, 37, 0.123456789012345679, " + - "'2013-07-23 21:00:05', 7.7, 23456789, false, 11, 'abcde', 1100, e'b\\t ', '1')"); - gpdb.runQuery("INSERT INTO pxf_parquet_write_padded_char VALUES ('row27_char_with_newline', 's_17', 11, 37, 0.123456789012345679, " + - "'2013-07-23 21:00:05', 7.7, 23456789, false, 11, 'abcde', 1100, e'c\\n ', '1')"); + gpdb.insertData("('row25_char_needs_padding', 's_17', 11, 37, 0.123456789012345679, " + + "'2013-07-23 21:00:05', 7.7, 23456789, false, 11, 'abcde', 1100, 'a ', '1')", writableExTable); + gpdb.insertData("('row26_char_with_tab', 's_17', 11, 37, 0.123456789012345679, " + + "'2013-07-23 21:00:05', 7.7, 23456789, false, 11, 'abcde', 1100, e'b\\t ', '1')", writableExTable); + gpdb.insertData("('row27_char_with_newline', 's_17', 11, 37, 0.123456789012345679, " + + "'2013-07-23 21:00:05', 7.7, 23456789, false, 11, 'abcde', 1100, e'c\\n ', '1')", writableExTable); if (protocol != ProtocolEnum.HDFS && protocol != ProtocolEnum.FILE) { // for HCFS on Cloud, wait a bit for async write in previous steps to finish @@ -195,7 +203,7 @@ public void parquetWriteUndefinedPrecisionNumeric() throws Exception { String writableExternalTableName = "pxf_parquet_write_undefined_precision_numeric"; prepareNumericWritableExtTable(filePathName, fileName, writableExternalTableName, false, false); - gpdb.runQuery("INSERT INTO " + exTable.getName() + " SELECT * FROM " + NUMERIC_UNDEFINED_PRECISION_TABLE); + gpdb.copyData(NUMERIC_UNDEFINED_PRECISION_TABLE, writableExTable); prepareReadableExternalTable("pxf_parquet_read_undefined_precision_numeric", UNDEFINED_PRECISION_NUMERIC, hdfsPath + fileName); @@ -220,7 +228,7 @@ public void parquetWriteNumericWithPrecisionAndScale() throws Exception { String writableExternalTableName = "pxf_parquet_write_numeric"; prepareNumericWritableExtTable(filePathName, fileName, writableExternalTableName, true, false); - gpdb.runQuery("INSERT INTO " + exTable.getName() + " SELECT * FROM " + NUMERIC_TABLE); + gpdb.copyData(NUMERIC_TABLE, writableExTable); prepareReadableExternalTable("pxf_parquet_read_numeric", PARQUET_TABLE_DECIMAL_COLUMNS, hdfsPath + fileName); @@ -257,7 +265,7 @@ public void parquetWriteUndefinedPrecisionNumericWithScaleOverflow() throws Exce String writableExternalTableName = "parquet_write_undefined_precision_numeric_large_scale"; prepareNumericWritableExtTable(filePathName, fileName, writableExternalTableName, false, false); - gpdb.runQuery("INSERT INTO " + exTable.getName() + " SELECT * FROM " + NUMERIC_UNDEFINED_PRECISION_TABLE); + gpdb.copyData(NUMERIC_UNDEFINED_PRECISION_TABLE, writableExTable); prepareReadableExternalTable("pxf_parquet_read_undefined_precision_numeric_large_scale", UNDEFINED_PRECISION_NUMERIC, hdfsPath + fileName); @@ -271,8 +279,7 @@ public void parquetWriteLists() throws Exception { String fullTestPath = hdfsPath + "parquet_write_list"; prepareWritableExternalTable(writeTableName, PARQUET_LIST_TABLE_COLUMNS, fullTestPath, null); - gpdb.runQuery("INSERT INTO " + exTable.getName() + " SELECT id, bool_arr, smallint_arr, int_arr, bigint_arr, real_arr, " + - "double_arr, text_arr, bytea_arr, char_arr, varchar_arr, numeric_arr, date_arr FROM " + PXF_PARQUET_LIST_TYPES); + gpdb.copyData(PXF_PARQUET_LIST_TYPES, writableExTable, PARQUET_ARRAY_COLUMN_NAMES); waitForAsyncWriteToSucceedOnHCFS("parquet_write_list"); @@ -290,7 +297,7 @@ public void parquetWriteTimestampLists() throws Exception { String fullTestPath = hdfsPath + "parquet_write_timestamp_list"; prepareWritableExternalTable(writeTableName, PARQUET_TIMESTAMP_LIST_TABLE_COLUMNS, fullTestPath, null); - gpdb.runQuery("INSERT INTO " + exTable.getName() + " SELECT id, tm_arr FROM " + PXF_PARQUET_TIMESTAMP_LIST_TYPES); + gpdb.copyData(PXF_PARQUET_TIMESTAMP_LIST_TYPES, writableExTable, new String[]{"id", "tm_arr"}); waitForAsyncWriteToSucceedOnHCFS("parquet_write_timestamp_list"); @@ -313,7 +320,7 @@ public void parquetWriteListsReadWithHive() throws Exception { String fullTestPath = hdfsPath + "parquet_write_list_read_with_hive"; prepareWritableExternalTable(writeTableName, PARQUET_LIST_TABLE_COLUMNS, fullTestPath, null); - insertArrayDataWithoutNulls(writeTableName, 33); + insertArrayDataWithoutNulls(writableExTable, 33); // load the data into hive to check that PXF-written Parquet files can be read by other data String hiveExternalTableName = writeTableName + "_external"; @@ -394,13 +401,11 @@ public void parquetWriteListsUserProvidedSchemaFile_ValidSchema() throws Excepti } hdfs.copyFromLocal(resourcePath + "parquet_list.schema", absoluteSchemaPath); - exTable.setExternalDataSchema(schemaPath); - // update the exTable with schema file provided - gpdb.createTableAndVerify(exTable); - - gpdb.runQuery("INSERT INTO " + exTable.getName() + " SELECT id, bool_arr, smallint_arr, int_arr, bigint_arr, real_arr, " + - "double_arr, text_arr, bytea_arr, char_arr, varchar_arr, numeric_arr, date_arr FROM " + PXF_PARQUET_LIST_TYPES); + writableExTable.setExternalDataSchema(schemaPath); + // update the writableExTable with schema file provided + gpdb.createTableAndVerify(writableExTable); + gpdb.copyData(PXF_PARQUET_LIST_TYPES, writableExTable, PARQUET_ARRAY_COLUMN_NAMES); waitForAsyncWriteToSucceedOnHCFS("parquet_write_lists_with_user_provided_schema_file_on_hcfs"); prepareReadableExternalTable(readTableName, PARQUET_LIST_TABLE_COLUMNS, fullTestPath); @@ -425,9 +430,9 @@ public void parquetWriteListsUserProvidedSchemaFile_InvalidSchema() throws Excep } hdfs.copyFromLocal(resourcePath + "invalid_parquet_list.schema", absoluteSchemaPath); - exTable.setExternalDataSchema(schemaPath); - // update the exTable with schema file provided - gpdb.createTableAndVerify(exTable); + writableExTable.setExternalDataSchema(schemaPath); + // update the writableExTable with schema file provided + gpdb.createTableAndVerify(writableExTable); runTincTest("pxf.features.parquet.write_list.write_with_invalid_schema_hcfs.runTest"); } @@ -436,9 +441,7 @@ private void runWritePrimitivesScenario(String writeTableName, String readTableN String filename, String[] userParameters) throws Exception { prepareWritableExternalTable(writeTableName, PARQUET_PRIMITIVE_TABLE_COLUMNS, hdfsPath + filename, userParameters); - gpdb.runQuery("INSERT INTO " + exTable.getName() + " SELECT s1, s2, n1, d1, dc1, tm, " + - "f, bg, b, tn, vc1, sml, c1, bin FROM " + PXF_PARQUET_PRIMITIVE_TABLE); - + gpdb.copyData(PXF_PARQUET_PRIMITIVE_TABLE, writableExTable, PARQUET_PRIMITIVE_COLUMN_NAMES); waitForAsyncWriteToSucceedOnHCFS(filename); prepareReadableExternalTable(readTableName, @@ -451,16 +454,16 @@ private void runWritePrimitivesScenario(String writeTableName, String readTableN } private void prepareReadableExternalTable(String name, String[] fields, String path) throws Exception { - exTable = TableFactory.getPxfHcfsReadableTable(name, fields, path, hdfs.getBasePath(), "parquet"); - createTable(exTable); + readableExTable = TableFactory.getPxfHcfsReadableTable(name, fields, path, hdfs.getBasePath(), "parquet"); + createTable(readableExTable); } private void prepareWritableExternalTable(String name, String[] fields, String path, String[] userParameters) throws Exception { - exTable = TableFactory.getPxfHcfsWritableTable(name, fields, path, hdfs.getBasePath(), "parquet"); + writableExTable = TableFactory.getPxfHcfsWritableTable(name, fields, path, hdfs.getBasePath(), "parquet"); if (userParameters != null) { - exTable.setUserParameters(userParameters); + writableExTable.setUserParameters(userParameters); } - createTable(exTable); + createTable(writableExTable); } private void waitForAsyncWriteToSucceedOnHCFS(String filename) throws Exception { @@ -478,9 +481,8 @@ private void waitForAsyncWriteToSucceedOnHCFS(String filename) throws Exception } } - private void insertArrayDataWithoutNulls(String exTable, int numRows) throws Exception { - StringBuilder insertStatement = new StringBuilder(); - insertStatement.append("INSERT INTO " + exTable + " VALUES "); + private void insertArrayDataWithoutNulls(Table exTable, int numRows) throws Exception { + StringBuilder values = new StringBuilder(); for (int i = 0; i < numRows; i++) { StringJoiner statementBuilder = new StringJoiner(",", "(", ")") .add(String.valueOf(i)) // always not-null row index, column index starts with 0 after it @@ -497,9 +499,9 @@ private void insertArrayDataWithoutNulls(String exTable, int numRows) throws Exc .add(String.format("'{12345678900000.00000%s}'", i)) // DataType.NUMERICARRAY .add(String.format("'{\"2010-01-%02d\"}'", (i % 30) + 1)) // DataType.DATEARRAY ; - insertStatement.append(statementBuilder.toString().concat((i < (numRows - 1)) ? "," : ";")); + values.append(statementBuilder.toString().concat((i < (numRows - 1)) ? "," : "")); } - gpdb.runQuery(insertStatement.toString()); + gpdb.insertData(values.toString(), exTable); } private void assertHiveByteaArrayData(List> queryResultData) { @@ -589,11 +591,11 @@ private void prepareNumericWritableExtTable(String filePathName, String fileName prepareWritableExternalTable(writableExternalTableName, numericTableColumns, hdfsPath + fileName, null); - exTable.setHost(pxfHost); - exTable.setPort(pxfPort); - exTable.setFormatter("pxfwritable_export"); - exTable.setProfile(ProtocolUtils.getProtocol().value() + ":parquet"); + writableExTable.setHost(pxfHost); + writableExTable.setPort(pxfPort); + writableExTable.setFormatter("pxfwritable_export"); + writableExTable.setProfile(ProtocolUtils.getProtocol().value() + ":parquet"); - gpdb.createTableAndVerify(exTable); + gpdb.createTableAndVerify(writableExTable); } } \ No newline at end of file diff --git a/automation/src/test/java/org/greenplum/pxf/automation/features/writable/HdfsWritableTextTest.java b/automation/src/test/java/org/greenplum/pxf/automation/features/writable/HdfsWritableTextTest.java index ec7b372c9a..92276f2c73 100755 --- a/automation/src/test/java/org/greenplum/pxf/automation/features/writable/HdfsWritableTextTest.java +++ b/automation/src/test/java/org/greenplum/pxf/automation/features/writable/HdfsWritableTextTest.java @@ -1,5 +1,7 @@ package org.greenplum.pxf.automation.features.writable; +import annotations.SkipForFDW; +import annotations.WorksWithFDW; import org.apache.commons.lang.StringUtils; import org.greenplum.pxf.automation.datapreparer.writable.WritableDataPreparer; import org.greenplum.pxf.automation.enums.EnumCompressionTypes; @@ -9,6 +11,7 @@ import org.greenplum.pxf.automation.structures.tables.pxf.WritableExternalTable; import org.greenplum.pxf.automation.structures.tables.utils.TableFactory; import org.greenplum.pxf.automation.utils.fileformats.FileFormatsUtils; +import org.greenplum.pxf.automation.utils.system.FDWUtils; import org.greenplum.pxf.automation.utils.system.ProtocolEnum; import org.greenplum.pxf.automation.utils.system.ProtocolUtils; import org.greenplum.pxf.automation.utils.tables.ComparisonUtils; @@ -26,6 +29,7 @@ /** * Testing cases for PXF Writable feature for Text formats (Text, CSV) and compressions. */ +@WorksWithFDW public class HdfsWritableTextTest extends BaseWritableFeature { private static final String COMPRESSION_CODEC = "org.apache.hadoop.io.compress.DefaultCodec"; @@ -73,7 +77,13 @@ public void textFormatInsertNoProfile() throws Exception { String hdfsPath = hdfsWritePath + "/text_format_no_profile"; writableExTable = prepareWritableTable("pxf_text_format_no_profile", hdfsPath, null); - writableExTable.setProfile(null); + if (FDWUtils.useFDW) { + // FDW does not really allow empty format, so we use special "test:text" profile that gets ignored + writableExTable.setFormat("text"); + writableExTable.setProfile("test:text"); + } else { + writableExTable.setProfile(null); + } writableExTable.setFragmenter("org.greenplum.pxf.plugins.hdfs.HdfsDataFragmenter"); writableExTable.setAccessor("org.greenplum.pxf.plugins.hdfs.LineBreakAccessor"); writableExTable.setResolver("org.greenplum.pxf.plugins.hdfs.StringPassResolver"); @@ -288,12 +298,7 @@ public void textFormatGZipInsert() throws Exception { public void textFormatGZipInsertShortname() throws Exception { String hdfsPath = hdfsWritePath + "/gzip_shortname_format_using_insert"; - writableExTable = new WritableExternalTable("pxf_gzip_shortname_format_using_insert", gpdbTableFields, - protocol.getExternalTablePath(hdfs.getBasePath(), hdfsPath), "Text"); - writableExTable.setProfile(ProtocolUtils.getProtocol().value() + ":text"); - writableExTable.setDelimiter(","); - writableExTable.setCompressionCodec("gzip"); - createTable(writableExTable); + writableExTable = prepareWritableGzipTable("pxf_gzip_shortname_format_using_insert", hdfsPath, "gzip"); insertData(dataTable, writableExTable, InsertionMethod.INSERT); verifyResult(hdfsPath, dataTable, EnumCompressionTypes.GZip); } @@ -341,7 +346,6 @@ public void textFormatBZip2Insert() throws Exception { String hdfsPath = hdfsWritePath + "/bzip2_format_using_insert"; writableExTable = prepareWritableBZip2Table("pxf_bzip2_format_using_insert", hdfsPath); - insertData(dataTable, writableExTable, InsertionMethod.INSERT); verifyResult(hdfsPath, dataTable, EnumCompressionTypes.BZip2); } @@ -356,7 +360,6 @@ public void textFormatBZip2CopyFromStdin() throws Exception { String hdfsPath = hdfsWritePath + "/copy_from_stdin_bzip2"; writableExTable = prepareWritableBZip2Table("pxf_copy_from_stdin_bzip2", hdfsPath); - insertData(dataTable, writableExTable, InsertionMethod.COPY); verifyResult(hdfsPath, dataTable, EnumCompressionTypes.BZip2); } @@ -370,12 +373,7 @@ public void textFormatBZip2CopyFromStdin() throws Exception { public void textFormatBZip2CopyFromStdinShortname() throws Exception { String hdfsPath = hdfsWritePath + "/copy_from_stdin_bzip2_shortname"; - writableExTable = new WritableExternalTable("pxf_copy_from_stdin_bzip2_shortname", gpdbTableFields, - protocol.getExternalTablePath(hdfs.getBasePath(), hdfsPath), "Text"); - writableExTable.setProfile(ProtocolUtils.getProtocol().value() + ":text"); - writableExTable.setDelimiter(","); - writableExTable.setCompressionCodec("bzip2"); - createTable(writableExTable); + writableExTable = prepareWritableBZip2Table("pxf_copy_from_stdin_bzip2_shortname", hdfsPath, "bzip2"); insertData(dataTable, writableExTable, InsertionMethod.COPY); verifyResult(hdfsPath, dataTable, EnumCompressionTypes.BZip2); } @@ -541,7 +539,9 @@ public void copyFromFileMultiBlockedDataBZip2() throws Exception { * The test creates a writable external table, copies the data into it, then * uses a readable external table to compare the data with the original. */ - @Test(groups = {"features"}) + // FDW does not yet support DISTRIBUTED BY on foreign tables, we can't guarantee only 1 segment will write data + @SkipForFDW + @Test(groups = {"features", "gpdb", "hcfs", "security"}) public void veryLongRecords() throws Exception { final String[][] data = new String[][]{ @@ -558,25 +558,24 @@ public void veryLongRecords() throws Exception { dataTable.addRows(data); String hdfsPath = hdfsWritePath + writableTableName + "_verylongrecord"; - writableExTable.setFields(fields); - writableExTable.setName("verylongrecordexport"); - writableExTable.setPath(hdfsPath); + writableExTable = TableFactory.getPxfWritableTextTable("pxf_text_very_long_records_w", fields, + protocol.getExternalTablePath(hdfs.getBasePath(), hdfsPath), ","); writableExTable.setFormat("CSV"); writableExTable.setDistributionFields(new String[]{"key"}); gpdb.createTableAndVerify(writableExTable); + insertData(dataTable, writableExTable, InsertionMethod.INSERT); - gpdb.insertData(dataTable, writableExTable); - Assert.assertEquals("More than one segment wrote to " + hdfsPath, - 1, hdfs.list(hdfsPath).size()); + if (protocol != ProtocolEnum.HDFS) { + // for HCFS on Cloud, wait a bit for async write in previous steps to finish + sleep(10000); + } + Assert.assertEquals("More than one segment wrote to " + hdfsPath,1, hdfs.list(hdfsPath).size()); - readableExTable.setFields(fields); - readableExTable.setPath(hdfsPath); - readableExTable.setName("verylongrecordimport"); - readableExTable.setFormat("csv"); + readableExTable = TableFactory.getPxfReadableCSVTable("pxf_text_very_long_records_r", fields, + protocol.getExternalTablePath(hdfs.getBasePath(), hdfsPath), ","); gpdb.createTableAndVerify(readableExTable); - gpdb.queryResults(readableExTable, - "SELECT * FROM " + readableExTable.getName() + " ORDER BY linenum"); + gpdb.queryResults(readableExTable,"SELECT * FROM " + readableExTable.getName() + " ORDER BY linenum"); ComparisonUtils.compareTables(readableExTable, dataTable, null); } @@ -594,7 +593,7 @@ private void verifyResult(String hdfsPath, Table data, EnumCompressionTypes comp String localResultFile = dataTempFolder + "/" + hdfsPath.replaceAll("/", "_"); // for HCFS on Cloud, wait a bit for async write in previous steps to finish - if (protocol != ProtocolEnum.HDFS && protocol != ProtocolEnum.FILE) { + if (protocol != ProtocolEnum.HDFS) { sleep(10000); } List files = hdfs.list(hdfsPath); @@ -602,11 +601,6 @@ private void verifyResult(String hdfsPath, Table data, EnumCompressionTypes comp int index = 0; for (String file : files) { String pathToLocalFile = localResultFile + "/_" + index; - // make sure the file is available, saw flakes on Cloud that listed files were not available - int attempts = 0; - while (!hdfs.doesFileExist(file) && attempts++ < 20) { - sleep(1000); - } hdfs.copyToLocal(file, pathToLocalFile); sleep(250); resultTable.loadDataFromFile(pathToLocalFile, ",", 1, "UTF-8", @@ -648,7 +642,7 @@ private void insertData(Table data, WritableExternalTable table, InsertionMethod gpdb.copyFromStdin(data, table, ",", false); break; case INSERT_FROM_TABLE: - gpdb.runQuery("INSERT INTO " + table.getName() + " SELECT * FROM " + data.getName()); + gpdb.copyData(data, table); break; } } @@ -685,15 +679,28 @@ private WritableExternalTable prepareWritableTable(String name, String path, Str } private WritableExternalTable prepareWritableBZip2Table(String name, String path) throws Exception { + return prepareWritableBZip2Table(name, path, null); + } + + private WritableExternalTable prepareWritableBZip2Table(String name, String path, String customCodecName) throws Exception { WritableExternalTable table = TableFactory.getPxfWritableBZip2Table(name, gpdbTableFields, protocol.getExternalTablePath(hdfs.getBasePath(), path), ","); + if (customCodecName != null) { + table.setCompressionCodec(customCodecName); + } createTable(table); return table; } private WritableExternalTable prepareWritableGzipTable(String name, String path) throws Exception { + return prepareWritableGzipTable(name, path, null); + } + private WritableExternalTable prepareWritableGzipTable(String name, String path, String customCodecName) throws Exception { WritableExternalTable table = TableFactory.getPxfWritableGzipTable(name, gpdbTableFields, protocol.getExternalTablePath(hdfs.getBasePath(), path), ","); + if (customCodecName != null) { + table.setCompressionCodec(customCodecName); + } createTable(table); return table; } diff --git a/automation/src/test/java/org/greenplum/pxf/automation/smoke/WritableSmokeTest.java b/automation/src/test/java/org/greenplum/pxf/automation/smoke/WritableSmokeTest.java index 7cd6728709..dac2b0725b 100755 --- a/automation/src/test/java/org/greenplum/pxf/automation/smoke/WritableSmokeTest.java +++ b/automation/src/test/java/org/greenplum/pxf/automation/smoke/WritableSmokeTest.java @@ -1,16 +1,25 @@ package org.greenplum.pxf.automation.smoke; -import java.io.File; - +import annotations.WorksWithFDW; import org.greenplum.pxf.automation.structures.tables.basic.Table; import org.greenplum.pxf.automation.structures.tables.pxf.WritableExternalTable; import org.greenplum.pxf.automation.structures.tables.utils.TableFactory; import org.greenplum.pxf.automation.utils.files.FileUtils; import org.testng.annotations.Test; +import java.io.File; + /** Write data to HDFS using Writable External table. Read it using PXF. */ +@WorksWithFDW public class WritableSmokeTest extends BaseSmoke { WritableExternalTable writableExTable; + private final static String[] FIELDS = new String[]{ + "name text", + "num integer", + "dub double precision", + "longNum bigint", + "bool boolean" + }; @Override protected void prepareData() throws Exception { @@ -23,30 +32,18 @@ protected void prepareData() throws Exception { @Override protected void createTables() throws Exception { // Create Writable external table - writableExTable = new WritableExternalTable("hdfs_writable_table", new String[] { - "name text", - "num integer", - "dub double precision", - "longNum bigint", - "bool boolean" - }, hdfs.getWorkingDirectory() + "/bzip", "Text"); + writableExTable = TableFactory.getPxfWritableTextTable("hdfs_writable_table", FIELDS, + hdfs.getWorkingDirectory() + "/bzip", "|"); - writableExTable.setAccessor("org.greenplum.pxf.plugins.hdfs.LineBreakAccessor"); - writableExTable.setResolver("org.greenplum.pxf.plugins.hdfs.StringPassResolver"); writableExTable.setCompressionCodec("org.apache.hadoop.io.compress.BZip2Codec"); - writableExTable.setDelimiter("|"); writableExTable.setHost(pxfHost); writableExTable.setPort(pxfPort); gpdb.createTableAndVerify(writableExTable); gpdb.copyFromFile(writableExTable, new File(dataTempFolder + "/" + fileName), "|", false); + // Create Readable External Table - exTable = TableFactory.getPxfReadableTextTable("pxf_smoke_small_data", new String[] { - "name text", - "num integer", - "dub double precision", - "longNum bigint", - "bool boolean" - }, hdfs.getWorkingDirectory() + "/bzip", "|"); + exTable = TableFactory.getPxfReadableTextTable("pxf_smoke_small_data", FIELDS, + hdfs.getWorkingDirectory() + "/bzip", "|"); exTable.setHost(pxfHost); exTable.setPort(pxfPort); gpdb.createTableAndVerify(exTable); diff --git a/automation/tinc/main/tinctest/lib/global_init_file b/automation/tinc/main/tinctest/lib/global_init_file index dd5f73311b..d193dd5338 100755 --- a/automation/tinc/main/tinctest/lib/global_init_file +++ b/automation/tinc/main/tinctest/lib/global_init_file @@ -67,6 +67,10 @@ s/FOREIGN TABLE/EXTERNAL TABLE/ m/, record \d+ of/ s/, record/, line/ +# rename resource (FDW) to file (exttable) +m/, resource/ +s/, resource/, file/ + # syntax mismatch error is different between exttable with GP6 and fdw with GP7 m/invalid input syntax for type/ s/invalid input syntax for type/invalid input syntax for/ diff --git a/automation/tincrepo/main/pxf/features/cloud_access/no_server_credentials_no_config_with_hdfs/expected/query01.ans b/automation/tincrepo/main/pxf/features/cloud_access/no_server_credentials_no_config_with_hdfs/expected/query01.ans index 389e183787..f0f8fcc390 100755 --- a/automation/tincrepo/main/pxf/features/cloud_access/no_server_credentials_no_config_with_hdfs/expected/query01.ans +++ b/automation/tincrepo/main/pxf/features/cloud_access/no_server_credentials_no_config_with_hdfs/expected/query01.ans @@ -18,6 +18,12 @@ -- m/pxf:\/\/(.*)\/pxf_automation_data/ -- s/pxf:\/\/.*PROFILE=s3:text&.*/pxf:\/\/pxf_automation_data?PROFILE=s3:text&ACCESS_AND_SECRET_KEY/ -- +-- m/default_s3/ +-- s/default_s3/default/ +-- +-- m/, file.*pxf_automation_data/ +-- s/, file.*pxf_automation_data.*/pxf_automation_data/ +-- -- end_matchsubs SELECT * FROM cloudaccess_no_server_credentials_no_config_with_hdfs; ERROR: PXF server error : profile 's3a' is not compatible with server's 'default' configuration ('hdfs') diff --git a/automation/tincrepo/main/pxf/features/cloud_access/no_server_credentials_no_config_with_hdfs/sql/query01.sql b/automation/tincrepo/main/pxf/features/cloud_access/no_server_credentials_no_config_with_hdfs/sql/query01.sql index 1aba89b577..ffa82957c9 100755 --- a/automation/tincrepo/main/pxf/features/cloud_access/no_server_credentials_no_config_with_hdfs/sql/query01.sql +++ b/automation/tincrepo/main/pxf/features/cloud_access/no_server_credentials_no_config_with_hdfs/sql/query01.sql @@ -18,6 +18,12 @@ -- m/pxf:\/\/(.*)\/pxf_automation_data/ -- s/pxf:\/\/.*PROFILE=s3:text&.*/pxf:\/\/pxf_automation_data?PROFILE=s3:text&ACCESS_AND_SECRET_KEY/ -- +-- m/default_s3/ +-- s/default_s3/default/ +-- +-- m/, file.*pxf_automation_data/ +-- s/, file.*pxf_automation_data.*/pxf_automation_data/ +-- -- end_matchsubs SELECT * FROM cloudaccess_no_server_credentials_no_config_with_hdfs; diff --git a/automation/tincrepo/main/pxf/features/cloud_access/no_server_no_credentials/expected/query01.ans b/automation/tincrepo/main/pxf/features/cloud_access/no_server_no_credentials/expected/query01.ans index 3f1eff778e..561c662366 100755 --- a/automation/tincrepo/main/pxf/features/cloud_access/no_server_no_credentials/expected/query01.ans +++ b/automation/tincrepo/main/pxf/features/cloud_access/no_server_no_credentials/expected/query01.ans @@ -12,16 +12,16 @@ -- m/DETAIL/ -- s/DETAIL/CONTEXT/ -- --- m/pxf:\/\/(.*)\/pxf_automation_data/ --- s/pxf:\/\/.*PROFILE/pxf:\/\/pxf_automation_data?PROFILE/ --- -- m/CONTEXT:.*line.*/ -- s/line \d* of //g -- +-- m/, file.*pxf_automation_data/ +-- s/, file.*pxf_automation_data.*/pxf_automation_data/ +-- -- end_matchsubs SELECT * FROM cloudaccess_no_server_no_credentials; ERROR: PXF server error : com.amazonaws.AmazonClientException: No AWS Credentials provided by BasicAWSCredentialsProvider -- start_ignore HINT: Check the PXF logs located in the 'logs-dir' directory on host 'mdw' or 'set client_min_messages=LOG' for additional details. -- end_ignore -DETAIL: External table cloudaccess_no_server_no_credentials, file pxf://pxf_automation_data?PROFILE=s3:text +DETAIL: External table cloudaccess_no_server_no_credentials, file pxf_automation_data diff --git a/automation/tincrepo/main/pxf/features/cloud_access/no_server_no_credentials/sql/query01.sql b/automation/tincrepo/main/pxf/features/cloud_access/no_server_no_credentials/sql/query01.sql index b720de7dd7..039c85b519 100755 --- a/automation/tincrepo/main/pxf/features/cloud_access/no_server_no_credentials/sql/query01.sql +++ b/automation/tincrepo/main/pxf/features/cloud_access/no_server_no_credentials/sql/query01.sql @@ -12,12 +12,12 @@ -- m/DETAIL/ -- s/DETAIL/CONTEXT/ -- --- m/pxf:\/\/(.*)\/pxf_automation_data/ --- s/pxf:\/\/.*PROFILE/pxf:\/\/pxf_automation_data?PROFILE/ --- -- m/CONTEXT:.*line.*/ -- s/line \d* of //g -- +-- m/, file.*pxf_automation_data/ +-- s/, file.*pxf_automation_data.*/pxf_automation_data/ +-- -- end_matchsubs SELECT * FROM cloudaccess_no_server_no_credentials; diff --git a/automation/tincrepo/main/pxf/features/cloud_access/no_server_no_credentials_with_hdfs/expected/query01.ans b/automation/tincrepo/main/pxf/features/cloud_access/no_server_no_credentials_with_hdfs/expected/query01.ans index 27a1b220f3..60420c3240 100755 --- a/automation/tincrepo/main/pxf/features/cloud_access/no_server_no_credentials_with_hdfs/expected/query01.ans +++ b/automation/tincrepo/main/pxf/features/cloud_access/no_server_no_credentials_with_hdfs/expected/query01.ans @@ -18,10 +18,16 @@ -- m/CONTEXT:.*line.*/ -- s/line \d* of //g -- +-- m/default_s3/ +-- s/default_s3/default/ +-- +-- m/, file.*pxf_automation_data/ +-- s/, file.*pxf_automation_data.*/pxf_automation_data/ +-- -- end_matchsubs SELECT * FROM cloudaccess_no_server_no_credentials_with_hdfs; ERROR: PXF server error : profile 's3a' is not compatible with server's 'default' configuration ('hdfs') -- start_ignore HINT: Ensure that 'server-dir' includes only the configuration files for profile 's3a'. Check the PXF logs located in the 'logs-dir' directory on host 'mdw' or 'set client_min_messages=LOG' for additional details. -- end_ignore -DETAIL: External table cloudaccess_no_server_no_credentials_with_hdfs, file pxf://pxf_automation_data?PROFILE=s3:text +DETAIL: External table cloudaccess_no_server_no_credentials_with_hdfs, file pxf_automation_data diff --git a/automation/tincrepo/main/pxf/features/cloud_access/no_server_no_credentials_with_hdfs/sql/query01.sql b/automation/tincrepo/main/pxf/features/cloud_access/no_server_no_credentials_with_hdfs/sql/query01.sql index 84ae63bfca..0934723946 100755 --- a/automation/tincrepo/main/pxf/features/cloud_access/no_server_no_credentials_with_hdfs/sql/query01.sql +++ b/automation/tincrepo/main/pxf/features/cloud_access/no_server_no_credentials_with_hdfs/sql/query01.sql @@ -18,6 +18,12 @@ -- m/CONTEXT:.*line.*/ -- s/line \d* of //g -- +-- m/default_s3/ +-- s/default_s3/default/ +-- +-- m/, file.*pxf_automation_data/ +-- s/, file.*pxf_automation_data.*/pxf_automation_data/ +-- -- end_matchsubs SELECT * FROM cloudaccess_no_server_no_credentials_with_hdfs; diff --git a/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_invalid_config/expected/query01.ans b/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_invalid_config/expected/query01.ans index 489828a064..4d5dcfb2b3 100755 --- a/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_invalid_config/expected/query01.ans +++ b/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_invalid_config/expected/query01.ans @@ -18,10 +18,13 @@ -- m/CONTEXT:.*line.*/ -- s/line \d* of //g -- +-- m/, file.*pxf_automation_data/ +-- s/, file.*pxf_automation_data.*/pxf_automation_data/ +-- -- end_matchsubs SELECT * FROM cloudaccess_server_no_credentials_invalid_config; ERROR: PXF server error : com.amazonaws.services.s3.model.AmazonS3Exception: Forbidden -- start_ignore HINT: Check the PXF logs located in the 'logs-dir' directory on host 'mdw' or 'set client_min_messages=LOG' for additional details. -- end_ignore -DETAIL: External table cloudaccess_server_no_credentials_invalid_config, file pxf://pxf_automation_data?PROFILE=s3:text&server=s3-invalid +DETAIL: External table cloudaccess_server_no_credentials_invalid_config, file pxf_automation_data diff --git a/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_invalid_config/sql/query01.sql b/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_invalid_config/sql/query01.sql index d0980b7fea..1ff81a6793 100755 --- a/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_invalid_config/sql/query01.sql +++ b/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_invalid_config/sql/query01.sql @@ -18,6 +18,9 @@ -- m/CONTEXT:.*line.*/ -- s/line \d* of //g -- +-- m/, file.*pxf_automation_data/ +-- s/, file.*pxf_automation_data.*/pxf_automation_data/ +-- -- end_matchsubs SELECT * FROM cloudaccess_server_no_credentials_invalid_config; diff --git a/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_invalid_config_with_hdfs/expected/query01.ans b/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_invalid_config_with_hdfs/expected/query01.ans index 5b680248fb..92428d9a06 100755 --- a/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_invalid_config_with_hdfs/expected/query01.ans +++ b/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_invalid_config_with_hdfs/expected/query01.ans @@ -18,10 +18,13 @@ -- m/CONTEXT:.*line.*/ -- s/line \d* of //g -- +-- m/, file.*pxf_automation_data/ +-- s/, file.*pxf_automation_data.*/pxf_automation_data/ +-- -- end_matchsubs SELECT * FROM cloudaccess_server_no_credentials_invalid_config_with_hdfs; ERROR: PXF server error : com.amazonaws.services.s3.model.AmazonS3Exception: Forbidden -- start_ignore HINT: Check the PXF logs located in the 'logs-dir' directory on host 'mdw' or 'set client_min_messages=LOG' for additional details. -- end_ignore -DETAIL: External table cloudaccess_server_no_credentials_invalid_config_with_hdfs, file pxf://pxf_automation_data?PROFILE=s3:text&server=s3-invalid +DETAIL: External table cloudaccess_server_no_credentials_invalid_config_with_hdfs, file pxf_automation_data diff --git a/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_invalid_config_with_hdfs/sql/query01.sql b/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_invalid_config_with_hdfs/sql/query01.sql index 17fa7b76ab..f28f4e1838 100755 --- a/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_invalid_config_with_hdfs/sql/query01.sql +++ b/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_invalid_config_with_hdfs/sql/query01.sql @@ -18,6 +18,9 @@ -- m/CONTEXT:.*line.*/ -- s/line \d* of //g -- +-- m/, file.*pxf_automation_data/ +-- s/, file.*pxf_automation_data.*/pxf_automation_data/ +-- -- end_matchsubs SELECT * FROM cloudaccess_server_no_credentials_invalid_config_with_hdfs; diff --git a/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_no_config/expected/query01.ans b/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_no_config/expected/query01.ans index 13f8472d68..2c3c2ac1b6 100755 --- a/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_no_config/expected/query01.ans +++ b/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_no_config/expected/query01.ans @@ -18,10 +18,13 @@ -- m/CONTEXT:.*line.*/ -- s/line \d* of //g -- +-- m/, file.*pxf_automation_data/ +-- s/, file.*pxf_automation_data.*/pxf_automation_data/ +-- -- end_matchsubs SELECT * FROM cloudaccess_server_no_credentials_no_config; ERROR: PXF server error : com.amazonaws.AmazonClientException: No AWS Credentials provided by BasicAWSCredentialsProvider -- start_ignore HINT: Check the PXF logs located in the 'logs-dir' directory on host 'mdw' or 'set client_min_messages=LOG' for additional details. -- end_ignore -DETAIL: External table cloudaccess_server_no_credentials_no_config, file pxf://pxf_automation_data?PROFILE=s3:text&server=s3-non-existent +DETAIL: External table cloudaccess_server_no_credentials_no_config, file pxf_automation_data diff --git a/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_no_config/sql/query01.sql b/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_no_config/sql/query01.sql index 7ee6ffa84e..fe31a10e67 100755 --- a/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_no_config/sql/query01.sql +++ b/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_no_config/sql/query01.sql @@ -18,6 +18,9 @@ -- m/CONTEXT:.*line.*/ -- s/line \d* of //g -- +-- m/, file.*pxf_automation_data/ +-- s/, file.*pxf_automation_data.*/pxf_automation_data/ +-- -- end_matchsubs SELECT * FROM cloudaccess_server_no_credentials_no_config; diff --git a/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_no_config_with_hdfs/expected/query01.ans b/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_no_config_with_hdfs/expected/query01.ans index 99f16b75ea..0af50070b0 100755 --- a/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_no_config_with_hdfs/expected/query01.ans +++ b/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_no_config_with_hdfs/expected/query01.ans @@ -21,10 +21,13 @@ -- m/CONTEXT:.*line.*/ -- s/line \d* of //g -- +-- m/, file.*pxf_automation_data/ +-- s/, file.*pxf_automation_data.*/pxf_automation_data/ +-- -- end_matchsubs SELECT * FROM cloudaccess_server_no_credentials_no_config_with_hdfs; ERROR: PXF server error : com.amazonaws.AmazonClientException: No AWS Credentials provided by BasicAWSCredentialsProvider -- start_ignore HINT: Check the PXF logs located in the 'logs-dir' directory on host 'mdw' or 'set client_min_messages=LOG' for additional details. -- end_ignore -DETAIL: External table cloudaccess_server_no_credentials_no_config_with_hdfs, file pxf://pxf_automation_data?PROFILE=s3:text&server=s3-non-existent +DETAIL: External table cloudaccess_server_no_credentials_no_config_with_hdfs, file pxf_automation_data diff --git a/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_no_config_with_hdfs/sql/query01.sql b/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_no_config_with_hdfs/sql/query01.sql index c2d2a92b7a..9d09b48626 100755 --- a/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_no_config_with_hdfs/sql/query01.sql +++ b/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_no_config_with_hdfs/sql/query01.sql @@ -21,6 +21,9 @@ -- m/CONTEXT:.*line.*/ -- s/line \d* of //g -- +-- m/, file.*pxf_automation_data/ +-- s/, file.*pxf_automation_data.*/pxf_automation_data/ +-- -- end_matchsubs SELECT * FROM cloudaccess_server_no_credentials_no_config_with_hdfs; diff --git a/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_valid_config_with_hdfs_write/expected/query01.ans b/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_valid_config_with_hdfs_write/expected/query01.ans index 44f4bfce41..1694a0a6c1 100755 --- a/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_valid_config_with_hdfs_write/expected/query01.ans +++ b/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_valid_config_with_hdfs_write/expected/query01.ans @@ -2,7 +2,14 @@ -- end_ignore -- @description query01 for PXF test for cloud write where server is specified, no credentials are specified, and configuration file exists running alongside an HDFS setup -- - +-- start_matchsubs +-- +-- # create a match/subs +-- +-- m/WARNING.*cannot analyze this foreign table/ +-- s/.*// +-- +-- end_matchsubs INSERT INTO cloudwrite_server_no_credentials_valid_config_with_hdfs_write SELECT md5(random()::text), round(random()*100) from generate_series(1,10); INSERT 0 10 diff --git a/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_valid_config_with_hdfs_write/sql/query01.sql b/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_valid_config_with_hdfs_write/sql/query01.sql index 31c7b1c111..8a734efdbf 100755 --- a/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_valid_config_with_hdfs_write/sql/query01.sql +++ b/automation/tincrepo/main/pxf/features/cloud_access/server_no_credentials_valid_config_with_hdfs_write/sql/query01.sql @@ -2,7 +2,14 @@ -- end_ignore -- @description query01 for PXF test for cloud write where server is specified, no credentials are specified, and configuration file exists running alongside an HDFS setup -- - +-- start_matchsubs +-- +-- # create a match/subs +-- +-- m/WARNING.*cannot analyze this foreign table/ +-- s/.*// +-- +-- end_matchsubs INSERT INTO cloudwrite_server_no_credentials_valid_config_with_hdfs_write SELECT md5(random()::text), round(random()*100) from generate_series(1,10); SELECT count(*) FROM cloudaccess_server_no_credentials_valid_config_with_hdfs_write; diff --git a/automation/tincrepo/main/pxf/features/hdfs/readable/avro/errors/no_schema_file/expected/query01.ans b/automation/tincrepo/main/pxf/features/hdfs/readable/avro/errors/no_schema_file/expected/query01.ans index 4ef64b6c1f..2ce2248dfd 100755 --- a/automation/tincrepo/main/pxf/features/hdfs/readable/avro/errors/no_schema_file/expected/query01.ans +++ b/automation/tincrepo/main/pxf/features/hdfs/readable/avro/errors/no_schema_file/expected/query01.ans @@ -13,6 +13,9 @@ -- m/(E|e)xception (r|R)eport +(m|M)essage/ -- s/(E|e)xception (r|R)eport +(m|M)essage/exception report message/ -- +-- m/, file .*/ +-- s/, file .*// +-- -- end_matchsubs SELECT * from avro_in_seq_no_schema; ERROR: PXF server error : Failed to obtain Avro schema from 'i_do_not_exist' diff --git a/automation/tincrepo/main/pxf/features/hdfs/readable/avro/errors/no_schema_file/sql/query01.sql b/automation/tincrepo/main/pxf/features/hdfs/readable/avro/errors/no_schema_file/sql/query01.sql index 74e9af38fa..0f3269a3ed 100755 --- a/automation/tincrepo/main/pxf/features/hdfs/readable/avro/errors/no_schema_file/sql/query01.sql +++ b/automation/tincrepo/main/pxf/features/hdfs/readable/avro/errors/no_schema_file/sql/query01.sql @@ -13,5 +13,8 @@ -- m/(E|e)xception (r|R)eport +(m|M)essage/ -- s/(E|e)xception (r|R)eport +(m|M)essage/exception report message/ -- +-- m/, file .*/ +-- s/, file .*// +-- -- end_matchsubs SELECT * from avro_in_seq_no_schema; diff --git a/fdw/pxf_fdw.c b/fdw/pxf_fdw.c index 52dca360c4..a4a1cc107a 100644 --- a/fdw/pxf_fdw.c +++ b/fdw/pxf_fdw.c @@ -82,9 +82,14 @@ static void pxfReScanForeignScan(ForeignScanState *node); static void pxfEndForeignScan(ForeignScanState *node); /* Foreign updates */ +static void pxfBeginForeignInsert(ModifyTableState *mtstate, ResultRelInfo *resultRelInfo); + static void pxfBeginForeignModify(ModifyTableState *mtstate, ResultRelInfo *resultRelInfo, List *fdw_private, int subplan_index, int eflags); + static TupleTableSlot *pxfExecForeignInsert(EState *estate, ResultRelInfo *resultRelInfo, TupleTableSlot *slot, TupleTableSlot *planSlot); +static void pxfEndForeignInsert(EState *estate, ResultRelInfo *resultRelInfo); + static void pxfEndForeignModify(EState *estate, ResultRelInfo *resultRelInfo); static int pxfIsForeignRelUpdatable(Relation rel); @@ -92,6 +97,8 @@ static int pxfIsForeignRelUpdatable(Relation rel); /* * Helper functions */ +static PxfFdwModifyState *InitForeignModify(Relation relation); +static void FinishForeignModify(PxfFdwModifyState *pxfmstate); static void InitCopyState(PxfFdwScanState *pxfsstate); static void InitCopyStateForModify(PxfFdwModifyState *pxfmstate); static CopyState BeginCopyTo(Relation forrel, List *options); @@ -139,6 +146,9 @@ pxf_fdw_handler(PG_FUNCTION_ARGS) * taken */ fdw_routine->PlanForeignModify = NULL; +#if PG_VERSION_NUM >= 120000 + fdw_routine->BeginForeignInsert = pxfBeginForeignInsert; +#endif fdw_routine->BeginForeignModify = pxfBeginForeignModify; fdw_routine->ExecForeignInsert = pxfExecForeignInsert; @@ -148,6 +158,9 @@ pxf_fdw_handler(PG_FUNCTION_ARGS) */ fdw_routine->ExecForeignUpdate = NULL; fdw_routine->ExecForeignDelete = NULL; +#if PG_VERSION_NUM >= 120000 + fdw_routine->EndForeignInsert = pxfEndForeignInsert; +#endif fdw_routine->EndForeignModify = pxfEndForeignModify; fdw_routine->IsForeignRelUpdatable = pxfIsForeignRelUpdatable; @@ -427,7 +440,7 @@ pxfBeginForeignScan(ForeignScanState *node, int eflags) /* Set up callback to identify error foreign relation. */ ErrorContextCallback errcallback; errcallback.callback = PxfBeginScanErrorCallback; - errcallback.arg = (void *) relation; + errcallback.arg = (void *) pxfsstate; errcallback.previous = error_context_stack; error_context_stack = &errcallback; @@ -558,6 +571,24 @@ pxfEndForeignScan(ForeignScanState *node) elog(DEBUG5, "pxf_fdw: pxfEndForeignScan ends on segment: %d", PXF_SEGMENT_ID); } +/* + * pxfBeginForeignInsert + * Begin an insert operation on a foreign table, called in COPY FROM flow + */ +static void +pxfBeginForeignInsert(ModifyTableState *mtstate, + ResultRelInfo *resultRelInfo) +{ + /* + * This would be the natural place to call external_insert_init(), but we + * delay that until the first actual insert. That's because we don't want + * to open the external resource if we don't end up actually inserting any + * rows in this segment. In particular, we don't want to initialize the + * external resource in the QD node, when all the actual insertions happen + * in the segments. + */ +} + /* * pxfBeginForeignModify * Begin an insert/update/delete operation on a foreign table @@ -569,24 +600,42 @@ pxfBeginForeignModify(ModifyTableState *mtstate, int subplan_index, int eflags) { - elog(DEBUG5, "pxf_fdw: pxfBeginForeignModify starts on segment: %d", PXF_SEGMENT_ID); + /* + * This would be the natural place to call external_insert_init(), but we + * delay that until the first actual insert. That's because we don't want + * to open the external resource if we don't end up actually inserting any + * rows in this segment. In particular, we don't want to initialize the + * external resource in the QD node, when all the actual insertions happen + * in the segments. + */ +} + +/* + * InitForeignModify + * Initialize various structures before actually performing insertion / modification + * of data in an external system + */ +static PxfFdwModifyState * +InitForeignModify(Relation relation) +{ + elog(DEBUG5, "pxf_fdw: InitForeignModify starts on segment: %d", PXF_SEGMENT_ID); ForeignTable *rel; Oid foreigntableid; PxfOptions *options = NULL; PxfFdwModifyState *pxfmstate = NULL; - Relation relation = resultRelInfo->ri_RelationDesc; TupleDesc tupDesc; - if (eflags & EXEC_FLAG_EXPLAIN_ONLY) - return; + // TODO: do we need to care about this ? +// if (eflags & EXEC_FLAG_EXPLAIN_ONLY) +// return; foreigntableid = RelationGetRelid(relation); rel = GetForeignTable(foreigntableid); if (Gp_role == GP_ROLE_DISPATCH && rel->exec_location == FTEXECLOCATION_ALL_SEGMENTS) /* master does not process any data when exec_location is all segments */ - return; + return NULL; tupDesc = RelationGetDescr(relation); options = PxfGetOptions(foreigntableid); @@ -602,9 +651,8 @@ pxfBeginForeignModify(ModifyTableState *mtstate, InitCopyStateForModify(pxfmstate); - resultRelInfo->ri_FdwState = pxfmstate; - elog(DEBUG5, "pxf_fdw: pxfBeginForeignModify ends on segment: %d", PXF_SEGMENT_ID); + return pxfmstate; } /* @@ -620,10 +668,15 @@ pxfExecForeignInsert(EState *estate, elog(DEBUG5, "pxf_fdw: pxfExecForeignInsert starts on segment: %d", PXF_SEGMENT_ID); PxfFdwModifyState *pxfmstate = (PxfFdwModifyState *) resultRelInfo->ri_FdwState; - - /* If pxfmstate is NULL, we are in MASTER when exec_location is all segments; nothing to do */ - if (pxfmstate == NULL) - return NULL; + if (!pxfmstate) + { + /* state has not been initialized yet, create and store it on the first call */ + pxfmstate = InitForeignModify(resultRelInfo->ri_RelationDesc); + /* if initialization was a noop (ANALYZE case or execution on COORDINATOR, exit */ + if (!pxfmstate) + return slot; + resultRelInfo->ri_FdwState = pxfmstate; + } CopyState cstate = pxfmstate->cstate; #if PG_VERSION_NUM < 90600 @@ -664,6 +717,21 @@ pxfExecForeignInsert(EState *estate, return slot; } +/* + * pxfEndForeignInsert + * Finish an insert operation on a foreign table + */ +static void +pxfEndForeignInsert(EState *estate, + ResultRelInfo *resultRelInfo) +{ + elog(DEBUG5, "pxf_fdw: pxfEndForeignInsert starts on segment: %d", PXF_SEGMENT_ID); + + FinishForeignModify(resultRelInfo->ri_FdwState); + + elog(DEBUG5, "pxf_fdw: pxfEndForeignInsert ends on segment: %d", PXF_SEGMENT_ID); +} + /* * pxfEndForeignModify * Finish an insert/update/delete operation on a foreign table @@ -674,8 +742,14 @@ pxfEndForeignModify(EState *estate, { elog(DEBUG5, "pxf_fdw: pxfEndForeignModify starts on segment: %d", PXF_SEGMENT_ID); - PxfFdwModifyState *pxfmstate = (PxfFdwModifyState *) resultRelInfo->ri_FdwState; + FinishForeignModify(resultRelInfo->ri_FdwState); + + elog(DEBUG5, "pxf_fdw: pxfEndForeignModify ends on segment: %d", PXF_SEGMENT_ID); +} +static void +FinishForeignModify(PxfFdwModifyState *pxfmstate) +{ /* If pxfmstate is NULL, we are in EXPLAIN or MASTER when exec_location is all segments; nothing to do */ if (pxfmstate == NULL) return; @@ -684,7 +758,6 @@ pxfEndForeignModify(EState *estate, pxfmstate->cstate = NULL; PxfBridgeCleanup(pxfmstate); - elog(DEBUG5, "pxf_fdw: pxfEndForeignModify ends on segment: %d", PXF_SEGMENT_ID); } /* @@ -793,8 +866,7 @@ InitCopyStateForModify(PxfFdwModifyState *pxfmstate) PxfBridgeExportStart(pxfmstate); /* - * Create CopyState from FDW options. We always acquire all columns, so - * as to match the expected ScanTupleSlot signature. + * Create CopyState from FDW options. We always acquire all columns to match the expected ScanTupleSlot signature. */ cstate = BeginCopyTo(pxfmstate->relation, copy_options); @@ -881,15 +953,23 @@ BeginCopyTo(Relation forrel, List *options) /* * PXF specific error context callback for "begin foreign scan" operation. * It replaces the "COPY" term in the error message context with - * the "Foreign table" term and provides the name of the foreign table + * the "Foreign table" term and provides the name of the foreign table and its resource option */ static void PxfBeginScanErrorCallback(void *arg) { - Relation relation = (Relation) arg; - if (relation) { - errcontext("Foreign table %s", RelationGetRelationName(relation)); - return; - } + PxfFdwScanState *pxfsstate = (PxfFdwScanState *) arg; + if (pxfsstate && pxfsstate->relation) { + if (pxfsstate->options && pxfsstate->options->resource) + { + errcontext("Foreign table %s, resource %s", + RelationGetRelationName(pxfsstate->relation), pxfsstate->options->resource); + } + else + { + errcontext("Foreign table %s", RelationGetRelationName(pxfsstate->relation)); + } + return; + } } /* diff --git a/server/build.gradle b/server/build.gradle index 1da8c6aa7d..4259d91411 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -77,6 +77,7 @@ configure(javaProjects) { dependency("com.google.cloud.bigdataoss:gcs-connector:hadoop2-1.9.17") dependency("com.microsoft.azure:azure-storage:5.4.0") dependency("com.microsoft.azure:azure-data-lake-store-sdk:2.3.9") + dependency("com.univocity:univocity-parsers:2.9.1") dependency("com.yammer.metrics:metrics-core:2.2.0") dependency("com.zaxxer:HikariCP:3.4.5") dependency("commons-codec:commons-codec:1.14") diff --git a/server/pxf-api/src/main/java/org/greenplum/pxf/api/io/DataType.java b/server/pxf-api/src/main/java/org/greenplum/pxf/api/io/DataType.java index c0b9f5074c..d1054840d6 100644 --- a/server/pxf-api/src/main/java/org/greenplum/pxf/api/io/DataType.java +++ b/server/pxf-api/src/main/java/org/greenplum/pxf/api/io/DataType.java @@ -20,6 +20,8 @@ */ +import java.util.EnumSet; + /** * Supported Data Types and OIDs (GPDB Data Type identifiers). * There's a one-to-one match between a Data Type and it's corresponding OID. @@ -72,6 +74,10 @@ public enum DataType { private static final int[] NOT_TEXT = {BIGINT.OID, BOOLEAN.OID, BYTEA.OID, FLOAT8.OID, INTEGER.OID, REAL.OID, SMALLINT.OID}; + // Set of types that preserve the type information when their value is deserialized, + // this is similar to NOT_TEXT above, but used explicitly in the deserialization case of PXF Write Flow + private static EnumSet SELF_DESER_TYPES = EnumSet.of(BOOLEAN, SMALLINT, INTEGER, BIGINT, REAL, FLOAT8, BYTEA); + static { INT2ARRAY.typeElem = SMALLINT; INT4ARRAY.typeElem = INTEGER; @@ -191,4 +197,18 @@ public DataType getTypeArray() { public boolean getNeedsEscapingInArray() { return needsEscapingInArray; } + + /** + * Returns the type that deserialization logic needs to report for backward compatibility with GPDBWritable, + * where only boolean/short/int/long/float/double/bytea are represented by their actual types + * and the rest of data types are represented as TEXT by the deserialization logic. + * @return the corresponding DataType when deserializing a value of a given type + */ + public DataType getDeserializationType() { + if (SELF_DESER_TYPES.contains(this)) { + return this; // return itself as the type that should be reported + } else { + return DataType.TEXT; // everything else is reported as TEXT once deserialized + } + } } diff --git a/server/pxf-api/src/main/java/org/greenplum/pxf/api/model/InputStreamHandler.java b/server/pxf-api/src/main/java/org/greenplum/pxf/api/model/InputStreamHandler.java new file mode 100644 index 0000000000..a4a57ffdeb --- /dev/null +++ b/server/pxf-api/src/main/java/org/greenplum/pxf/api/model/InputStreamHandler.java @@ -0,0 +1,14 @@ +package org.greenplum.pxf.api.model; + +import java.lang.annotation.Retention; +import java.lang.annotation.Target; + +import static java.lang.annotation.ElementType.TYPE; + +/** + * Annotation for marking Plugins as capable of handling InputStream with raw data. + */ +@Retention(java.lang.annotation.RetentionPolicy.RUNTIME) +@Target({ TYPE }) +public @interface InputStreamHandler { +} diff --git a/server/pxf-api/src/test/java/org/greenplum/pxf/api/io/DataTypeTest.java b/server/pxf-api/src/test/java/org/greenplum/pxf/api/io/DataTypeTest.java index 91e111672b..6550f1651b 100644 --- a/server/pxf-api/src/test/java/org/greenplum/pxf/api/io/DataTypeTest.java +++ b/server/pxf-api/src/test/java/org/greenplum/pxf/api/io/DataTypeTest.java @@ -48,6 +48,25 @@ public void testNeedsEscapingInArray() { } } + @Test + public void testDeserializationTypes() { + for (DataType dataType : DataType.values()) { + switch (dataType) { + case BOOLEAN: + case SMALLINT: + case INTEGER: + case BIGINT: + case FLOAT8: + case REAL: + case BYTEA: + assertEquals(dataType, dataType.getDeserializationType()); + break; + default: + assertEquals(DataType.TEXT, dataType.getDeserializationType()); + } + } + } + private boolean needsEscapingForElementsInArray(int oid) { return oid == DataType.BYTEAARRAY.getOID() || oid == DataType.TEXTARRAY.getOID() || oid == DataType.BPCHARARRAY.getOID() || oid == DataType.VARCHARARRAY.getOID() || diff --git a/server/pxf-hdfs/src/main/java/org/greenplum/pxf/plugins/hdfs/AvroResolver.java b/server/pxf-hdfs/src/main/java/org/greenplum/pxf/plugins/hdfs/AvroResolver.java index 652ca72519..df4a40cfe3 100644 --- a/server/pxf-hdfs/src/main/java/org/greenplum/pxf/plugins/hdfs/AvroResolver.java +++ b/server/pxf-hdfs/src/main/java/org/greenplum/pxf/plugins/hdfs/AvroResolver.java @@ -162,11 +162,12 @@ public OneRow setFields(List record) { for (OneField field : record) { if (field.type == DataType.BYTEA.getOID()) { // Avro does not seem to understand regular byte arrays - field.val = field.val != null ? ByteBuffer.wrap((byte[]) field.val) : null; + field.val = (field.val == null) ? null : + (field.val instanceof ByteBuffer) ? field.val : ByteBuffer.wrap((byte[]) field.val); } else if (field.type == DataType.SMALLINT.getOID()) { // Avro doesn't have a short, just an int type field.val = field.val != null ? (int) (short) field.val : null; - } else if (field.type == DataType.TEXT.getOID()) { + } else if (field.type == DataType.TEXT.getOID() || DataType.get(field.type).isArrayType()) { // when field.type is TEXT, it might be an actual TEXT field or an array type field.val = avroUtilities.decodeString(schema.getFields().get(cnt).schema(), (String) field.val, true, hasUserProvidedSchema); } diff --git a/server/pxf-hdfs/src/main/java/org/greenplum/pxf/plugins/hdfs/StringPassResolver.java b/server/pxf-hdfs/src/main/java/org/greenplum/pxf/plugins/hdfs/StringPassResolver.java index 4915d1e027..0037ca1fb8 100644 --- a/server/pxf-hdfs/src/main/java/org/greenplum/pxf/plugins/hdfs/StringPassResolver.java +++ b/server/pxf-hdfs/src/main/java/org/greenplum/pxf/plugins/hdfs/StringPassResolver.java @@ -23,6 +23,7 @@ import org.greenplum.pxf.api.OneField; import org.greenplum.pxf.api.OneRow; import org.greenplum.pxf.api.model.BasePlugin; +import org.greenplum.pxf.api.model.InputStreamHandler; import org.greenplum.pxf.api.model.Resolver; import java.io.InputStream; @@ -37,6 +38,7 @@ * String records. StringPassResolver implements {@link Resolver} * interface. Returns strings as-is. */ +@InputStreamHandler public class StringPassResolver extends BasePlugin implements Resolver { // for write private final OneRow oneRow = new OneRow(); diff --git a/server/pxf-hdfs/src/test/java/org/greenplum/pxf/plugins/hdfs/AvroResolverTest.java b/server/pxf-hdfs/src/test/java/org/greenplum/pxf/plugins/hdfs/AvroResolverTest.java index 15e8cd7229..b3fdcc5762 100644 --- a/server/pxf-hdfs/src/test/java/org/greenplum/pxf/plugins/hdfs/AvroResolverTest.java +++ b/server/pxf-hdfs/src/test/java/org/greenplum/pxf/plugins/hdfs/AvroResolverTest.java @@ -30,6 +30,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -97,6 +98,49 @@ public void testSetFields_Primitive() { assertEquals("row1", genericRecord.get(6)); } + @Test + public void testSetFields_BytesWithByteArray() { + schema = getAvroSchemaForBytes(); + context.setMetadata(schema); + resolver.setRequestContext(context); + resolver.afterPropertiesSet(); + + List fields = new ArrayList<>(); + fields.add(new OneField(DataType.BYTEA.getOID(), new byte[]{(byte) 49})); + OneRow row = resolver.setFields(fields); + + assertNotNull(row); + Object data = row.getData(); + assertNotNull(data); + assertTrue(data instanceof GenericRecord); + GenericRecord genericRecord = (GenericRecord) data; + + // assert column values + assertEquals(ByteBuffer.wrap(new byte[]{(byte) 49}), genericRecord.get(0)); + } + + @Test + public void testSetFields_BytesWithByteBuffer() { + schema = getAvroSchemaForBytes(); + context.setMetadata(schema); + resolver.setRequestContext(context); + resolver.afterPropertiesSet(); + + List fields = new ArrayList<>(); + ByteBuffer byteBuffer = ByteBuffer.wrap(new byte[]{(byte) 49}); + fields.add(new OneField(DataType.BYTEA.getOID(), byteBuffer)); + OneRow row = resolver.setFields(fields); + + assertNotNull(row); + Object data = row.getData(); + assertNotNull(data); + assertTrue(data instanceof GenericRecord); + GenericRecord genericRecord = (GenericRecord) data; + + // assert column values + assertSame(byteBuffer, genericRecord.get(0)); + } + @Test public void testSetFields_PrimitiveNulls() throws Exception { schema = getAvroSchemaForPrimitiveTypes(); @@ -161,6 +205,56 @@ public void testSetFields_Complex() { assertEquals("{key1:123456789,key2:234567890}", genericRecord.get(5)); } + /** + * Tests field processing when PXF RecordReader using GPDBWritable reads an array element and reports it + * as OneField with the TEXT OID. + */ + @Test + public void testSetFields_IntArrayWithTextOID() { + schema = getAvroSchemaForIntegerArray(); + context.setMetadata(schema); + resolver.setRequestContext(context); + resolver.afterPropertiesSet(); + + List fields = new ArrayList<>(); + fields.add(new OneField(DataType.TEXT.getOID(), "{1,2,3}")); + OneRow row = resolver.setFields(fields); + + assertNotNull(row); + Object data = row.getData(); + assertNotNull(data); + assertTrue(data instanceof GenericRecord); + GenericRecord genericRecord = (GenericRecord) data; + + // assert column values + assertEquals(Arrays.asList(1,2,3), genericRecord.get(0)); + } + + /** + * Tests field processing when PXF RecordReader using TextWritable (in FDW case) reads an array element and reports it + * as OneField with the INTARRAY OID. + */ + @Test + public void testSetFields_IntArrayWithArrayOID() { + schema = getAvroSchemaForIntegerArray(); + context.setMetadata(schema); + resolver.setRequestContext(context); + resolver.afterPropertiesSet(); + + List fields = new ArrayList<>(); + fields.add(new OneField(DataType.INT4ARRAY.getOID(), "{1,2,3}")); + OneRow row = resolver.setFields(fields); + + assertNotNull(row); + Object data = row.getData(); + assertNotNull(data); + assertTrue(data instanceof GenericRecord); + GenericRecord genericRecord = (GenericRecord) data; + + // assert column values + assertEquals(Arrays.asList(1,2,3), genericRecord.get(0)); + } + @Test public void testGetFields_Primitive() throws Exception { List columnDescriptors = createColumnDescriptors(primitiveDataTypes); @@ -582,6 +676,27 @@ private Schema getAvroSchemaForPrimitiveTypes() { return schema; } + private Schema getAvroSchemaForBytes() { + Schema schema = Schema.createRecord("tableName", "", "public.avro", false); + List fields = new ArrayList<>(); + fields.add(new Schema.Field(Schema.Type.BYTES.getName(), Schema.create(Schema.Type.BYTES), "", null)); + schema.setFields(fields); + return schema; + } + + private Schema getAvroSchemaForIntegerArray() { + Schema schema = Schema.createRecord("tableName", "", "public.avro", false); + List fields = new ArrayList<>(); + fields.add(new Schema.Field( + Schema.Type.ARRAY.getName(), + Schema.createArray(Schema.create(Schema.Type.INT)), + "", + null) + ); + schema.setFields(fields); + return schema; + } + private Schema getAvroSchemaForComplexTypes() { Schema schema = Schema.createRecord("tableName", "", "public.avro", false); List fields = new ArrayList<>(); diff --git a/server/pxf-service/build.gradle b/server/pxf-service/build.gradle index 48bdc13a16..54953ec339 100644 --- a/server/pxf-service/build.gradle +++ b/server/pxf-service/build.gradle @@ -39,10 +39,12 @@ dependencies { implementation("org.apache.logging.log4j:log4j-spring-boot") implementation('org.springframework.boot:spring-boot-starter-actuator') implementation('io.micrometer:micrometer-registry-prometheus') + implementation("com.univocity:univocity-parsers") implementation("org.apache.hadoop:hadoop-hdfs-client") { transitive = false } implementation("org.apache.hadoop:hadoop-auth") { transitive = false } + /******************************* * These JARs below (and its transitive dependencies, other than txw2 [for writing XML docs]) are needed for Java 11 * jcenter doesn't have full com.sun.xml.bind:jaxb-core/jaxb-impl packages, using glassfish distro diff --git a/server/pxf-service/src/main/java/org/greenplum/pxf/service/BridgeInputBuilder.java b/server/pxf-service/src/main/java/org/greenplum/pxf/service/BridgeInputBuilder.java deleted file mode 100644 index f2622c513f..0000000000 --- a/server/pxf-service/src/main/java/org/greenplum/pxf/service/BridgeInputBuilder.java +++ /dev/null @@ -1,72 +0,0 @@ -package org.greenplum.pxf.service; - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.greenplum.pxf.api.GPDBWritableMapper; -import org.greenplum.pxf.api.OneField; -import org.greenplum.pxf.api.io.DataType; -import org.greenplum.pxf.api.io.GPDBWritable; -import org.greenplum.pxf.api.model.OutputFormat; - -import java.io.DataInput; -import java.nio.charset.Charset; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -public class BridgeInputBuilder { - - private static final Log LOG = LogFactory.getLog(BridgeInputBuilder.class); - - public List makeInput(Charset databaseEncoding, OutputFormat outputFormat, DataInput inputStream) throws Exception { - if (outputFormat == OutputFormat.TEXT) { - // Avoid copying the bytes from the inputStream directly. This - // code used to use the Text class to read bytes until a line - // delimiter was found. This would cause issues with wide rows that - // had 1MB+, because the Text code grows the array to fit data, and - // it does it inefficiently. We observed multiple calls to - // System.arraycopy in the setCapacity method for every byte after - // we exceeded the original buffer size. This caused terrible - // performance in PXF, even when writing a single row to an external - // system. - return Collections.singletonList(new OneField(DataType.BYTEA.getOID(), inputStream)); - } - - GPDBWritable gpdbWritable = new GPDBWritable(databaseEncoding); - gpdbWritable.readFields(inputStream); - - if (gpdbWritable.isEmpty()) { - LOG.debug("Reached end of stream"); - return null; - } - - GPDBWritableMapper mapper = new GPDBWritableMapper(gpdbWritable); - int[] colTypes = gpdbWritable.getColType(); - List record = new ArrayList<>(colTypes.length); - for (int i = 0; i < colTypes.length; i++) { - mapper.setDataType(colTypes[i]); - record.add(new OneField(colTypes[i], mapper.getData(i))); - } - return record; - } -} diff --git a/server/pxf-service/src/main/java/org/greenplum/pxf/service/HttpRequestParser.java b/server/pxf-service/src/main/java/org/greenplum/pxf/service/HttpRequestParser.java index e578402eba..da81c196ff 100644 --- a/server/pxf-service/src/main/java/org/greenplum/pxf/service/HttpRequestParser.java +++ b/server/pxf-service/src/main/java/org/greenplum/pxf/service/HttpRequestParser.java @@ -2,6 +2,7 @@ import org.apache.commons.lang.StringUtils; import org.greenplum.pxf.api.error.PxfRuntimeException; +import org.greenplum.pxf.api.model.GreenplumCSV; import org.greenplum.pxf.api.model.OutputFormat; import org.greenplum.pxf.api.model.PluginConf; import org.greenplum.pxf.api.model.RequestContext; @@ -255,12 +256,30 @@ String getServerApiVersion() { } private void parseGreenplumCSV(RequestMap params, RequestContext context) { - context.getGreenplumCSV() - .withDelimiter(params.removeUserProperty("DELIMITER")) - .withEscapeChar(params.removeUserProperty("ESCAPE")) - .withNewline(params.removeUserProperty("NEWLINE")) - .withQuoteChar(params.removeUserProperty("QUOTE")) - .withValueOfNull(params.removeUserProperty("NULL")); + // TODO: produce one with different defaults for TEXT vs CSV + GreenplumCSV greenplumCSV = context.getGreenplumCSV(); + + // override default format options with values that are specified in the request + String delimiter = params.removeUserProperty("DELIMITER"); + if (delimiter != null) { + greenplumCSV.withDelimiter(delimiter); + } + String escape = params.removeUserProperty("ESCAPE"); + if (escape != null) { + greenplumCSV.withEscapeChar(escape); + } + String newline = params.removeUserProperty("NEWLINE"); + if (newline != null) { + greenplumCSV.withNewline(newline); + } + String quote = params.removeUserProperty("QUOTE"); + if (quote != null) { + greenplumCSV.withQuoteChar(quote); + } + String nullValue = params.removeUserProperty("NULL"); + if (nullValue != null) { + greenplumCSV.withValueOfNull(nullValue); + } } /** diff --git a/server/pxf-service/src/main/java/org/greenplum/pxf/service/bridge/Bridge.java b/server/pxf-service/src/main/java/org/greenplum/pxf/service/bridge/Bridge.java index 263e0e55f4..c6ae2ad108 100644 --- a/server/pxf-service/src/main/java/org/greenplum/pxf/service/bridge/Bridge.java +++ b/server/pxf-service/src/main/java/org/greenplum/pxf/service/bridge/Bridge.java @@ -21,9 +21,6 @@ import org.greenplum.pxf.api.io.Writable; -import org.greenplum.pxf.api.model.Accessor; -import org.greenplum.pxf.api.model.Plugin; -import org.greenplum.pxf.api.model.Resolver; import java.io.DataInputStream; @@ -42,9 +39,24 @@ public interface Bridge { */ boolean beginIteration() throws Exception; + /** + * Reads new data from the external system and wraps it such that it can be written to an OutputStream. + * @return the new data represented by the Writable object + * @throws Exception when an error occurs during the operation + */ Writable getNext() throws Exception; + /** + * Reads new data from the provided InputStream and sends it to the external system + * @param inputStream the input stream to read the data from + * @return true if the data was read and processed, false if there is no more data to read + * @throws Exception when an error occurs during the operation + */ boolean setNext(DataInputStream inputStream) throws Exception; + /** + * End the iteration for data access. Implementations need to close any underlying resources. + * @throws Exception when an error occurs during the operation + */ void endIteration() throws Exception; } diff --git a/server/pxf-service/src/main/java/org/greenplum/pxf/service/bridge/ReadBridge.java b/server/pxf-service/src/main/java/org/greenplum/pxf/service/bridge/ReadBridge.java index 075cf2b402..d6ce412f7d 100644 --- a/server/pxf-service/src/main/java/org/greenplum/pxf/service/bridge/ReadBridge.java +++ b/server/pxf-service/src/main/java/org/greenplum/pxf/service/bridge/ReadBridge.java @@ -116,8 +116,9 @@ public Writable getNext() throws Exception { } /** - * Close the underlying resource + * {@inheritDoc} */ + @Override public void endIteration() throws Exception { try { accessor.closeForRead(); diff --git a/server/pxf-service/src/main/java/org/greenplum/pxf/service/bridge/SimpleBridgeFactory.java b/server/pxf-service/src/main/java/org/greenplum/pxf/service/bridge/SimpleBridgeFactory.java index 1cae454813..d555f6e510 100644 --- a/server/pxf-service/src/main/java/org/greenplum/pxf/service/bridge/SimpleBridgeFactory.java +++ b/server/pxf-service/src/main/java/org/greenplum/pxf/service/bridge/SimpleBridgeFactory.java @@ -4,6 +4,7 @@ import org.greenplum.pxf.api.model.RequestContext; import org.greenplum.pxf.api.model.WriteVectorizedResolver; import org.greenplum.pxf.api.utilities.Utilities; +import org.greenplum.pxf.service.serde.RecordReaderFactory; import org.greenplum.pxf.service.utilities.BasePluginFactory; import org.greenplum.pxf.service.utilities.GSSFailureHandler; import org.springframework.stereotype.Component; @@ -12,10 +13,12 @@ public class SimpleBridgeFactory implements BridgeFactory { private final BasePluginFactory pluginFactory; + private final RecordReaderFactory recordReaderFactory; private final GSSFailureHandler failureHandler; - public SimpleBridgeFactory(BasePluginFactory pluginFactory, GSSFailureHandler failureHandler) { + public SimpleBridgeFactory(BasePluginFactory pluginFactory, RecordReaderFactory recordReaderFactory, GSSFailureHandler failureHandler) { this.pluginFactory = pluginFactory; + this.recordReaderFactory = recordReaderFactory; this.failureHandler = failureHandler; } @@ -28,9 +31,9 @@ public Bridge getBridge(RequestContext context) { Bridge bridge; if (context.getRequestType() == RequestContext.RequestType.WRITE_BRIDGE) { if (useWriteVectorization(context)) { - bridge = new WriteVectorizedBridge(pluginFactory, context, failureHandler); + bridge = new WriteVectorizedBridge(pluginFactory, recordReaderFactory, context, failureHandler); } else { - bridge = new WriteBridge(pluginFactory, context, failureHandler); + bridge = new WriteBridge(pluginFactory, recordReaderFactory, context, failureHandler); } } else if (context.getRequestType() != RequestContext.RequestType.READ_BRIDGE) { throw new UnsupportedOperationException("Current Operation is not supported"); diff --git a/server/pxf-service/src/main/java/org/greenplum/pxf/service/bridge/WriteBridge.java b/server/pxf-service/src/main/java/org/greenplum/pxf/service/bridge/WriteBridge.java index 734e06ec8e..92feeed7d8 100644 --- a/server/pxf-service/src/main/java/org/greenplum/pxf/service/bridge/WriteBridge.java +++ b/server/pxf-service/src/main/java/org/greenplum/pxf/service/bridge/WriteBridge.java @@ -21,11 +21,12 @@ import org.greenplum.pxf.api.OneField; import org.greenplum.pxf.api.OneRow; -import org.greenplum.pxf.api.error.BadRecordException; import org.greenplum.pxf.api.io.Writable; +import org.greenplum.pxf.api.model.InputStreamHandler; import org.greenplum.pxf.api.model.OutputFormat; import org.greenplum.pxf.api.model.RequestContext; -import org.greenplum.pxf.service.BridgeInputBuilder; +import org.greenplum.pxf.service.serde.RecordReader; +import org.greenplum.pxf.service.serde.RecordReaderFactory; import org.greenplum.pxf.service.utilities.BasePluginFactory; import org.greenplum.pxf.service.utilities.GSSFailureHandler; @@ -33,22 +34,33 @@ import java.nio.charset.Charset; import java.util.List; -/* - * WriteBridge class creates appropriate accessor and resolver. - * It reads data from inputStream by the resolver, - * and writes it to the Hadoop storage with the accessor. +/** + * WriteBridge orchestrates writing data received from GPDB into an external system. It provides methods + * to start and stop the iteration process as well as the iterative method to read database table tuples + * from the input stream, transform them with a resolver and store them into the external system using an accessor. */ public class WriteBridge extends BaseBridge { - protected final BridgeInputBuilder inputBuilder; protected final OutputFormat outputFormat; protected final Charset databaseEncoding; + protected final RecordReader recordReader; - public WriteBridge(BasePluginFactory pluginFactory, RequestContext context, GSSFailureHandler failureHandler) { + /** + * Creates a new instance + * @param pluginFactory factory for creating plugins + * @param recordReaderFactory factory for creating a record reader to deserialize incoming data + * @param context request context + * @param failureHandler failure handler for GSS errors + */ + public WriteBridge(BasePluginFactory pluginFactory, RecordReaderFactory recordReaderFactory, + RequestContext context, GSSFailureHandler failureHandler) { super(pluginFactory, context, failureHandler); - this.inputBuilder = new BridgeInputBuilder(); this.outputFormat = context.getOutputFormat(); this.databaseEncoding = context.getDatabaseEncoding(); + + // create record reader for incoming data deserialization + this.recordReader = recordReaderFactory.getRecordReader(context, + resolver.getClass().isAnnotationPresent(InputStreamHandler.class)); } /** @@ -60,14 +72,18 @@ public boolean beginIteration() throws Exception { return failureHandler.execute(context.getConfiguration(), "begin iteration", () -> accessor.openForWrite(), this::beforeRetryCallback); } - /* - * Read data from stream, convert it using Resolver into OneRow object, and - * pass to WriteAccessor to write into file. + /** + * Reads a record (usually a database table tuple) from the input stream using an InputBuilder, + * converts it into OneRow object (a representation suitable for the external system) using a Resolver, + * and stores it into the external system using an Accessor. + * @param inputStream input stream containing data + * @return true if data was read and processed, false if there was no more data to read + * @throws Exception if any operation failed */ @Override public boolean setNext(DataInputStream inputStream) throws Exception { - List record = inputBuilder.makeInput(databaseEncoding, outputFormat, inputStream); + List record = recordReader.readRecord(inputStream); if (record == null) { return false; } @@ -76,14 +92,13 @@ public boolean setNext(DataInputStream inputStream) throws Exception { if (onerow == null) { return false; } - if (!accessor.writeNextObject(onerow)) { - throw new BadRecordException(); - } - return true; + + // if accessor fails to write data it should throw an exception, if nothing was written, then there's no more data + return accessor.writeNextObject(onerow); } - /* - * Close the underlying resource + /** + * {@inheritDoc} */ public void endIteration() throws Exception { try { diff --git a/server/pxf-service/src/main/java/org/greenplum/pxf/service/bridge/WriteVectorizedBridge.java b/server/pxf-service/src/main/java/org/greenplum/pxf/service/bridge/WriteVectorizedBridge.java index 947a5c251d..3451683af1 100644 --- a/server/pxf-service/src/main/java/org/greenplum/pxf/service/bridge/WriteVectorizedBridge.java +++ b/server/pxf-service/src/main/java/org/greenplum/pxf/service/bridge/WriteVectorizedBridge.java @@ -5,6 +5,7 @@ import org.greenplum.pxf.api.error.BadRecordException; import org.greenplum.pxf.api.model.RequestContext; import org.greenplum.pxf.api.model.WriteVectorizedResolver; +import org.greenplum.pxf.service.serde.RecordReaderFactory; import org.greenplum.pxf.service.utilities.BasePluginFactory; import org.greenplum.pxf.service.utilities.GSSFailureHandler; @@ -21,11 +22,13 @@ public class WriteVectorizedBridge extends WriteBridge { /** * Creates a new instance of the bridge. * @param pluginFactory plugin factory + * @param recordReaderFactory factory for creating a record reader to deserialize incoming data * @param context request context * @param failureHandler failure handler */ - public WriteVectorizedBridge(BasePluginFactory pluginFactory, RequestContext context, GSSFailureHandler failureHandler) { - super(pluginFactory, context, failureHandler); + public WriteVectorizedBridge(BasePluginFactory pluginFactory, RecordReaderFactory recordReaderFactory, + RequestContext context, GSSFailureHandler failureHandler) { + super(pluginFactory, recordReaderFactory, context, failureHandler); } /** @@ -42,7 +45,7 @@ public boolean setNext(DataInputStream inputStream) throws Exception { List> batch = new ArrayList<>(batchSize); int recordCount = 0; while (recordCount < batchSize) { - List record = inputBuilder.makeInput(databaseEncoding, outputFormat, inputStream); + List record = recordReader.readRecord(inputStream); if (record == null) { break; // no more records to read } diff --git a/server/pxf-service/src/main/java/org/greenplum/pxf/service/controller/WriteServiceImpl.java b/server/pxf-service/src/main/java/org/greenplum/pxf/service/controller/WriteServiceImpl.java index b1e332ae4c..7b686e1281 100644 --- a/server/pxf-service/src/main/java/org/greenplum/pxf/service/controller/WriteServiceImpl.java +++ b/server/pxf-service/src/main/java/org/greenplum/pxf/service/controller/WriteServiceImpl.java @@ -47,7 +47,7 @@ public String writeData(RequestContext context, InputStream inputStream) throws } /** - * Reads the input stream, iteratively submits submits data from the stream to created bridge. + * Reads the input stream, iteratively submits data from the stream to created bridge. * * @param context request context * @param inputStream input stream diff --git a/server/pxf-service/src/main/java/org/greenplum/pxf/service/serde/BaseRecordReader.java b/server/pxf-service/src/main/java/org/greenplum/pxf/service/serde/BaseRecordReader.java new file mode 100644 index 0000000000..afda64a357 --- /dev/null +++ b/server/pxf-service/src/main/java/org/greenplum/pxf/service/serde/BaseRecordReader.java @@ -0,0 +1,39 @@ +package org.greenplum.pxf.service.serde; + +import org.greenplum.pxf.api.OneField; +import org.greenplum.pxf.api.model.RequestContext; +import org.greenplum.pxf.api.utilities.ColumnDescriptor; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.DataInput; +import java.nio.charset.Charset; +import java.util.List; + +/** + * Base class for record readers, stores request context and a few commonly used properties from it. + */ +public abstract class BaseRecordReader implements RecordReader { + + protected final Logger LOG = LoggerFactory.getLogger(this.getClass()); + + protected final RequestContext context; + protected final List columnDescriptors; + protected final Charset databaseEncoding; + + /** + * Creates a new instance + * @param context request context + */ + public BaseRecordReader(RequestContext context) { + this.context = context; + columnDescriptors = context.getTupleDescription(); + databaseEncoding = context.getDatabaseEncoding(); + } + + /** + * {@inheritDoc} + */ + @Override + abstract public List readRecord(DataInput input) throws Exception; +} diff --git a/server/pxf-api/src/main/java/org/greenplum/pxf/api/GPDBWritableMapper.java b/server/pxf-service/src/main/java/org/greenplum/pxf/service/serde/GPDBWritableMapper.java similarity index 98% rename from server/pxf-api/src/main/java/org/greenplum/pxf/api/GPDBWritableMapper.java rename to server/pxf-service/src/main/java/org/greenplum/pxf/service/serde/GPDBWritableMapper.java index 6b1cbda2b2..49274ba72d 100644 --- a/server/pxf-api/src/main/java/org/greenplum/pxf/api/GPDBWritableMapper.java +++ b/server/pxf-service/src/main/java/org/greenplum/pxf/service/serde/GPDBWritableMapper.java @@ -1,4 +1,4 @@ -package org.greenplum.pxf.api; +package org.greenplum.pxf.service.serde; /* * Licensed to the Apache Software Foundation (ASF) under one @@ -24,7 +24,7 @@ import org.greenplum.pxf.api.io.DataType; import org.greenplum.pxf.api.io.GPDBWritable; -/* +/** * Class for mapping GPDBWritable get functions to java types. */ public class GPDBWritableMapper { diff --git a/server/pxf-service/src/main/java/org/greenplum/pxf/service/serde/GPDBWritableRecordReader.java b/server/pxf-service/src/main/java/org/greenplum/pxf/service/serde/GPDBWritableRecordReader.java new file mode 100644 index 0000000000..14d2209a40 --- /dev/null +++ b/server/pxf-service/src/main/java/org/greenplum/pxf/service/serde/GPDBWritableRecordReader.java @@ -0,0 +1,47 @@ +package org.greenplum.pxf.service.serde; + +import org.greenplum.pxf.api.OneField; +import org.greenplum.pxf.api.io.GPDBWritable; +import org.greenplum.pxf.api.model.RequestContext; + +import java.io.DataInput; +import java.util.ArrayList; +import java.util.List; + +/** + * Record reader that reads data from an input stream and deserializes database tuples encoded in GPDBWritable format. + */ +public class GPDBWritableRecordReader extends BaseRecordReader { + + /** + * Creates a new instance + * @param context request context + */ + public GPDBWritableRecordReader(RequestContext context) { + super(context); + } + + /** + * {@inheritDoc} + */ + @Override + public List readRecord(DataInput input) throws Exception { + GPDBWritable gpdbWritable = new GPDBWritable(databaseEncoding); + gpdbWritable.readFields(input); + + if (gpdbWritable.isEmpty()) { + LOG.debug("Reached end of stream"); + return null; + } + + // TODO: can mapper be initialized once the first time on initially (based on columnDescriptors) ? + GPDBWritableMapper mapper = new GPDBWritableMapper(gpdbWritable); + int[] colTypes = gpdbWritable.getColType(); + List record = new ArrayList<>(colTypes.length); + for (int i = 0; i < colTypes.length; i++) { + mapper.setDataType(colTypes[i]); + record.add(new OneField(colTypes[i], mapper.getData(i))); + } + return record; + } +} diff --git a/server/pxf-service/src/main/java/org/greenplum/pxf/service/serde/RecordReader.java b/server/pxf-service/src/main/java/org/greenplum/pxf/service/serde/RecordReader.java new file mode 100644 index 0000000000..b9198728d2 --- /dev/null +++ b/server/pxf-service/src/main/java/org/greenplum/pxf/service/serde/RecordReader.java @@ -0,0 +1,24 @@ +package org.greenplum.pxf.service.serde; + +import org.greenplum.pxf.api.OneField; + +import java.io.DataInput; +import java.util.List; + +/** + * Interface for deserialization of an input stream with data from Greenplum into a List of OneField objects + * for downstream consumption by resolvers. The implementations of this interface deal with the actual + * specifics of how data is serialized by Greenplum PXF extension for different formatting specifications. + */ +public interface RecordReader { + + /** + * Reads the provided input stream received from GPDB and deserializes a database tuple according to the + * outputFormat specification. The tuple is deserialized into a List of OneField objects that are used by + * a downstream resolver to construct data representation appropriate for the external system. + * @param input a data input stream + * @return a list of OneField objects, generally corresponding to columns of a database tuple + * @throws Exception if the operation fails + */ + List readRecord(DataInput input) throws Exception; +} diff --git a/server/pxf-service/src/main/java/org/greenplum/pxf/service/serde/RecordReaderFactory.java b/server/pxf-service/src/main/java/org/greenplum/pxf/service/serde/RecordReaderFactory.java new file mode 100644 index 0000000000..7a6a12c7f0 --- /dev/null +++ b/server/pxf-service/src/main/java/org/greenplum/pxf/service/serde/RecordReaderFactory.java @@ -0,0 +1,61 @@ +package org.greenplum.pxf.service.serde; + +import com.google.common.base.Preconditions; +import org.greenplum.pxf.api.error.PxfRuntimeException; +import org.greenplum.pxf.api.model.OutputFormat; +import org.greenplum.pxf.api.model.RequestContext; +import org.greenplum.pxf.plugins.hdfs.utilities.PgUtilities; +import org.springframework.stereotype.Component; + +/** + * A factory that creates a new RecordReader to deserialize data from Greenplum. It looks into the information + * in the RequestContext to decide which particular RecordReader to create. + * This is a Spring Component that gets auto-wired into other Spring services. + */ +@Component +public class RecordReaderFactory { + + private final PgUtilities pgUtilities; + + /** + * Creates a new instance of the factory. + * @param pgUtilities utilities instance that helps with binary and array operations + */ + public RecordReaderFactory(PgUtilities pgUtilities) { + this.pgUtilities = pgUtilities; + } + + /** + * Creates a new RecordReader instance. The actual class implementing the RecordReader interface is decided + * by inspecting the outputFormat ('TEXT' or 'GPDBWritable') that the provided RequestContext contains. + * @param context the request context + * @param canHandleInputStream true if the downstream resolver can handle an input stream, false otherwise + * @return a new RecordReader implementation + */ + public RecordReader getRecordReader(RequestContext context, boolean canHandleInputStream) { + OutputFormat outputFormat = context.getOutputFormat(); + Preconditions.checkNotNull(outputFormat, "outputFormat is not set in RequestContext"); + switch (outputFormat) { + case GPDBWritable: + return new GPDBWritableRecordReader(context); + case TEXT: + if (canHandleInputStream) { + /* + If downstream components (resolver / accessor) can handle an inputStream directly, use a shortcut + to avoid reading bytes from the inputStream here and instead pass the inputStream in the record. + This code used to use the Text class to read bytes until a line delimiter was found. This would cause + issues with wide rows that had 1MB+, because the Text code grows the array to fit data, and + it does so inefficiently. We observed multiple calls to System.arraycopy in the setCapacity method + for every byte after we exceeded the original buffer size. This caused terrible performance in PXF, + even when writing a single row to an external system. + */ + return new StreamRecordReader(context); + } else { + return new TextRecordReader(context, pgUtilities); + } + default: + // in case there are more formats in the future and this class is not updated + throw new PxfRuntimeException("Unsupported output format " + context.getOutputFormat()); + } + } +} diff --git a/server/pxf-service/src/main/java/org/greenplum/pxf/service/serde/StreamRecordReader.java b/server/pxf-service/src/main/java/org/greenplum/pxf/service/serde/StreamRecordReader.java new file mode 100644 index 0000000000..e1af2b129f --- /dev/null +++ b/server/pxf-service/src/main/java/org/greenplum/pxf/service/serde/StreamRecordReader.java @@ -0,0 +1,36 @@ +package org.greenplum.pxf.service.serde; + +import org.greenplum.pxf.api.OneField; +import org.greenplum.pxf.api.io.DataType; +import org.greenplum.pxf.api.model.RequestContext; + +import java.io.DataInput; +import java.util.Collections; +import java.util.List; + +/** + * A RecordReader that does not actually read data, but instead stores the whole input stream as the value of + * the first and only field of the resulting record. The data will be read by downstream components from the input + * stream directly. + * + * This is a performance optimization used, for example, by the StringPassResolver and LineBreakAccessor to not break + * the incoming stream into records and instead just copy incoming bytes to the external system. + */ +public class StreamRecordReader extends BaseRecordReader { + + /** + * Creates a new instance + * @param context request context + */ + public StreamRecordReader(RequestContext context) { + super(context); + } + + /** + * {@inheritDoc} + */ + @Override + public List readRecord(DataInput input) { + return Collections.singletonList(new OneField(DataType.BYTEA.getOID(), input)); + } +} diff --git a/server/pxf-service/src/main/java/org/greenplum/pxf/service/serde/TextRecordReader.java b/server/pxf-service/src/main/java/org/greenplum/pxf/service/serde/TextRecordReader.java new file mode 100644 index 0000000000..0c4cd3db7b --- /dev/null +++ b/server/pxf-service/src/main/java/org/greenplum/pxf/service/serde/TextRecordReader.java @@ -0,0 +1,219 @@ +package org.greenplum.pxf.service.serde; + +import com.univocity.parsers.common.ParsingContext; +import com.univocity.parsers.common.ResultIterator; +import com.univocity.parsers.common.fields.FieldSet; +import com.univocity.parsers.common.record.Record; +import com.univocity.parsers.common.record.RecordMetaData; +import com.univocity.parsers.conversions.Conversions; +import com.univocity.parsers.conversions.ObjectConversion; +import com.univocity.parsers.csv.CsvFormat; +import com.univocity.parsers.csv.CsvParser; +import com.univocity.parsers.csv.CsvParserSettings; +import org.apache.commons.lang.StringUtils; +import org.greenplum.pxf.api.OneField; +import org.greenplum.pxf.api.error.PxfRuntimeException; +import org.greenplum.pxf.api.io.DataType; +import org.greenplum.pxf.api.model.GreenplumCSV; +import org.greenplum.pxf.api.model.RequestContext; +import org.greenplum.pxf.plugins.hdfs.utilities.PgUtilities; + +import java.io.DataInput; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +/** + * A RecordReader that reads data from an input stream and deserializes database tuples encoded in TEXT format. + */ +public class TextRecordReader extends BaseRecordReader { + + // max number of columns in a Greenplum table + // see MaxHeapAttributeNumber in https://github.com/greenplum-db/gpdb/blob/main/src/include/access/htup_details.h + private static final int MAX_COLUMNS = 1600; + + private final GreenplumCSV greenplumCSV; + private final CsvParser parser; + private final PgUtilities pgUtilities; + private ResultIterator iterator; + private int numColumns; + private int[] columnTypes; + private Class[] javaTypes; + private boolean initialized = false; + + /** + * Creates a new instance and sets up a CSV parser and its settings + * @param context request context + * @param pgUtilities an instance of utilities with helper methods for binary and array types + */ + public TextRecordReader(RequestContext context, PgUtilities pgUtilities) { + super(context); + this.pgUtilities = pgUtilities; + greenplumCSV = context.getGreenplumCSV(); // get the specification of CSV parameters + + // set parser settings based on data from greenplumCSV + CsvFormat csvFormat = new CsvFormat(); + csvFormat.setDelimiter(greenplumCSV.getDelimiter()); + csvFormat.setLineSeparator(greenplumCSV.getNewline()); + csvFormat.setQuote(greenplumCSV.getQuote()); + csvFormat.setQuoteEscape(greenplumCSV.getEscape()); + + // adjust parser setting to be appropriate for our on-the-wire format of CSV / TSV serialization + CsvParserSettings parserSettings = new CsvParserSettings(); + parserSettings.setFormat(csvFormat); + parserSettings.setCommentProcessingEnabled(false); // there should be no comments, do not waste time analyzing + parserSettings.setIgnoreLeadingWhitespaces(false); // do not remove any whitespaces + parserSettings.setIgnoreTrailingWhitespaces(false); // do not remove any whitespaces + parserSettings.setMaxColumns(MAX_COLUMNS); // align max columns with Greenplum spec + // we should've set maxCharsPerColumn value to 1GB (max size in GP) or larger (for multibyte UTF8 chars) + // but Univocity tries to allocate the buffer of this size ahead of time, which is very inefficient + // parserSettings.setMaxCharsPerColumn(Integer.MAX_VALUE); + + // create the CSV parser with desired settings + parser = new CsvParser(parserSettings); + + if (LOG.isDebugEnabled()) { + // replace new line and tab characters so that the log message takes only 1 line + LOG.debug("Configured CSV Parser : {}", csvFormat.toString().replaceAll("\n\t+", " | ")); + } + } + + private void initialize() { + // provide for handling of custom null values, if applicable + RecordMetaData metadata = parser.getRecordMetadata(); + String nullValue = greenplumCSV.getValueOfNull(); + if (StringUtils.isNotBlank(nullValue)) { + LOG.debug("Setting custom value of NULL to {}", nullValue); + FieldSet fieldSet = metadata.convertIndexes(Conversions.toNull(nullValue)); + fieldSet.set(IntStream.range(0, columnDescriptors.size()).boxed().collect(Collectors.toList())); + } + + // setup arrays for quick access to data types and java types by column index + numColumns = columnDescriptors.size(); + columnTypes = new int[numColumns]; + javaTypes = new Class[numColumns]; + FieldSet booleanFields = metadata.convertIndexes(Conversions.toBoolean("t", "f")); + FieldSet binaryFields = metadata.convertIndexes(new BinaryConversion()); + for (int columnIndex = 0; columnIndex < numColumns; columnIndex++) { + DataType dataType = columnDescriptors.get(columnIndex).getDataType(); + int columnType = dataType.getDeserializationType().getOID(); + columnTypes[columnIndex] = columnType; + javaTypes[columnIndex] = getJavaClass(dataType); + // process value conversions + switch (dataType) { + case BOOLEAN: + booleanFields.add(columnIndex); + break; + case BYTEA: + binaryFields.add(columnIndex); + break; + } + } + initialized = true; + } + + /** + * {@inheritDoc} + */ + @Override + public List readRecord(DataInput input) throws Exception { + if (iterator == null) { + iterator = parser.iterateRecords((InputStream) input, context.getDatabaseEncoding()).iterator(); + } + + if (!iterator.hasNext()) { + return null; // no more data to read + } + + // this can only be done after iterator read a record or called hasNext() + if (!initialized) { + initialize(); + } + + // parse a new record from the input stream + Record csvRecord = iterator.next(); + + // make sure the number of fields is the same as the number of columns + int numFields = csvRecord.getValues().length; + if (numFields != numColumns) { + throw new PxfRuntimeException( + String.format("Number of record fields %d is not equal to the number of table columns %d", + numFields, numColumns)); + } + + // create the target record to be returned + List record = new ArrayList<>(numColumns); + + // convert record to a List of OneField objects according to the column types + for (int columnIndex = 0; columnIndex < numColumns; columnIndex++) { + Object fieldValue = csvRecord.getValue(columnIndex, javaTypes[columnIndex]); + // Univocity cannot handle custom converter with null value, so we have to convert BYTEA here ourselves + if (columnTypes[columnIndex] == DataType.BYTEA.getOID() && fieldValue != null) { + fieldValue = pgUtilities.parseByteaLiteral((String) fieldValue); + } + record.add(new OneField(columnTypes[columnIndex], fieldValue)); + } + + return record; + } + + private Class getJavaClass(DataType dataType) { + // only very specific numeric types will get their own functions + // all other data types are considered as Strings + switch (dataType) { + case BOOLEAN: + return Boolean.class; + // this section below should've been here, but since I could not get univocity to return null value properly + // for the custom BinaryConversion, we treat BYTEA as a String here and will do parsing in readRecord method + // case BYTEA: + // return ByteBuffer.class; + case BIGINT: + return Long.class; + case SMALLINT: + return Short.class; + case INTEGER: + return Integer.class; + case REAL: + return Float.class; + case FLOAT8: + return Double.class; + default: + // everything else was serialized as a string and will be further converted by a resolver + return String.class; + } + } + + /** + * Converts Strings that contain Greenplum binary data in escape format to ByteBuffers + */ + public class BinaryConversion extends ObjectConversion { + + /** + * Creates a Conversion from String to ByteBuffer with default values to return when the input is null. + * This default constructor assumes the output of a conversion should be null when input is null + */ + public BinaryConversion() { + super(); + } + + /** + * Creates a Conversion from String to ByteBuffer with default values to return when the input is null. + * @param valueIfStringIsNull default ByteBuffer value to be returned when the input String is null. Used when {@link ObjectConversion#execute(String)} is invoked. + * @param valueIfObjectIsNull default String value to be returned when a ByteBuffer input is null. Used when {@code revert(ByteBuffer)} is invoked. + */ + public BinaryConversion(ByteBuffer valueIfStringIsNull, String valueIfObjectIsNull) { + super(valueIfStringIsNull, valueIfObjectIsNull); + } + + /** + * Converts a String to Byte. + */ + @Override + protected ByteBuffer fromString(String input) { + return input == null ? null : pgUtilities.parseByteaLiteral(input); + } + } +} diff --git a/server/pxf-service/src/test/java/org/greenplum/pxf/service/BridgeInputBuilderTest.java b/server/pxf-service/src/test/java/org/greenplum/pxf/service/BridgeInputBuilderTest.java deleted file mode 100644 index 66f1353ce3..0000000000 --- a/server/pxf-service/src/test/java/org/greenplum/pxf/service/BridgeInputBuilderTest.java +++ /dev/null @@ -1,150 +0,0 @@ -package org.greenplum.pxf.service; - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - - -import org.apache.commons.io.IOUtils; -import org.greenplum.pxf.api.OneField; -import org.greenplum.pxf.api.io.DataType; -import org.greenplum.pxf.api.model.OutputFormat; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Test; - -import java.io.ByteArrayInputStream; -import java.io.DataInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.nio.charset.StandardCharsets; -import java.util.Arrays; -import java.util.List; - -import static org.junit.jupiter.api.Assertions.assertArrayEquals; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; - -public class BridgeInputBuilderTest { - BridgeInputBuilder inputBuilder; - DataInputStream inputStream; - - @Test - /* - * Test makeInput method: small \n terminated input - */ - public void makeInput() throws Exception { - - byte[] data = new byte[]{ - (int) 'a', - (int) 'b', - (int) 'c', - (int) 'd', - (int) '\n', - (int) 'n', - (int) 'o', - (int) '\n'}; - - prepareInput(data); - - List record = inputBuilder.makeInput(StandardCharsets.UTF_8, OutputFormat.TEXT, inputStream); - - // the inputStream is exhausted completely, so we check line breaks too - verifyRecord(record, Arrays.copyOfRange(data, 0, 8)); - } - - @Test - /* - * Test the makeInput method: input > buffer size, \n terminated - */ - public void makeInputBigArray() throws Exception { - - byte[] bigArray = new byte[2000]; - for (int i = 0; i < 1999; ++i) { - bigArray[i] = (byte) (i % 10 + 30); - } - bigArray[1999] = (byte) '\n'; - - prepareInput(bigArray); - - List record = inputBuilder.makeInput(StandardCharsets.UTF_8, OutputFormat.TEXT, inputStream); - - verifyRecord(record, bigArray); - } - - @Test - /* - * Test the makeInput method: input > buffer size, no \n - */ - public void makeInputBigArrayNoNewLine() throws Exception { - - byte[] bigArray = new byte[2000]; - for (int i = 0; i < 2000; ++i) { - bigArray[i] = (byte) (i % 10 + 60); - } - - prepareInput(bigArray); - - List record = inputBuilder.makeInput(StandardCharsets.UTF_8, OutputFormat.TEXT, inputStream); - - verifyRecord(record, bigArray); - } - - @Test - /* - * Test the makeInput method: empty stream (returns -1) - */ - public void makeInputEmptyStream() throws Exception { - - byte[] empty = new byte[0]; - - prepareInput(empty); - - List record = inputBuilder.makeInput(StandardCharsets.UTF_8, OutputFormat.TEXT, inputStream); - - verifyRecord(record, empty); - } - - /* - * helpers functions - */ - - @AfterEach - public void cleanUp() throws IOException { - if (inputStream != null) { - inputStream.close(); - } - } - - private void prepareInput(byte[] data) { - inputBuilder = new BridgeInputBuilder(); - inputStream = new DataInputStream(new ByteArrayInputStream(data)); - } - - private void verifyRecord(List record, byte[] expected) throws IOException { - assertEquals(record.size(), 1); - - OneField field = record.get(0); - assertEquals(field.type, DataType.BYTEA.getOID()); - assertTrue(field.val instanceof InputStream); - - byte[] bytes = IOUtils.toByteArray((InputStream) field.val); - byte[] result = Arrays.copyOfRange(bytes, 0, bytes.length); - assertEquals(result.length, expected.length); - assertArrayEquals(result, expected); - } -} diff --git a/server/pxf-service/src/test/java/org/greenplum/pxf/service/GPDataGenerator.java b/server/pxf-service/src/test/java/org/greenplum/pxf/service/GPDataGenerator.java new file mode 100644 index 0000000000..872dc842bd --- /dev/null +++ b/server/pxf-service/src/test/java/org/greenplum/pxf/service/GPDataGenerator.java @@ -0,0 +1,308 @@ +package org.greenplum.pxf.service; + +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.Getter; +import org.apache.commons.codec.binary.Hex; +import org.greenplum.pxf.api.OneField; +import org.greenplum.pxf.api.io.DataType; +import org.greenplum.pxf.api.utilities.ColumnDescriptor; + +import java.io.IOException; +import java.io.PrintStream; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.StringJoiner; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * This is a utility class that has a main method and assertion functions to work with sample datasets. + * + * The main method generates a 'sample_data.sql' file in 'src/test/resources' directory. The file contains: + * - a DDL statement for a definition of the Greenplum table 'sample_data' to hold sample data + * - DML statements to insert the sample dataset into the 'sample_data' table + * - DML statements to COPY the sample dataset to a set of CSV and TEXT files in 'src/test/resources' + * + * The sample dataset includes all types supported by PXF. There is at least one row for every datatype with + * NULL value. + * + * The assertion methods compare values of a deserialized dataset with the original values produced by the class. + */ +public class GPDataGenerator { + + private static final String dir = System.getenv("HOME") + "/workspace/pxf/server/pxf-service/src/test/resources/data/"; + + /** + * Enum representing different formats used to export the sample data set into a file using Greenplum COPY command. + */ + public enum FORMAT { + CSV("sample_data.csv", ',', null, '"', '"'), + // for TEXT quote is not a concept, leave double quote character for constructor parameter + TEXT("sample_data.txt", '\t', "\\N",'"', '\\'), + CSV_PIPE("sample_data_pipe.csv", '|', null,'"', '"'); + + @Getter + private final String filename; + @Getter + private final Character delimiter; + @Getter + private final String nil; + @Getter + private final Character quote; + @Getter + private final Character escape; + + /** + * Constructor, creates a new instance of the enum + * @param filename name of the file where data will be exported to + * @param delimiter field delimiter character + * @param nil string representation of value of NULL + * @param quote quote character + * @param escape escape character + */ + FORMAT(String filename, Character delimiter, String nil, Character quote, Character escape) { + this.filename = filename; + this.delimiter = delimiter; + this.nil = nil; + this.quote = quote; + this.escape = escape; + } + } + + /** + * A POJO to capture all metadata about a column of a dataset. + */ + @Data + @AllArgsConstructor + private static class Column { + String name; // name of the column , used to create SQL DDL + String sqlType; // SQL type of the column , used to create SQL DDL + DataType type; // DataType of the column , used to produce List for mocking + DataType deserializedType; // DataType of the deserialized value, used in verification of deserialized OneField types + Class clazz; // Java class of the value , used in verification of deserialized OneField values + } + + // the schema of the sample dataset as an array of Column types + private static final Column[] COLUMNS = { + new Column("id" ,"integer" , DataType.INTEGER , DataType.INTEGER , Integer.class ), + new Column("name" ,"text" , DataType.TEXT , DataType.TEXT , String.class ), + new Column("sml" ,"smallint" , DataType.SMALLINT , DataType.SMALLINT, Short.class ), + new Column("integ" ,"integer" , DataType.INTEGER , DataType.INTEGER , Integer.class ), + new Column("bg" ,"bigint" , DataType.BIGINT , DataType.BIGINT , Long.class ), + new Column("r" ,"real" , DataType.REAL , DataType.REAL , Float.class ), + new Column("dp" ,"double precision" , DataType.FLOAT8 , DataType.FLOAT8 , Double.class ), + new Column("dec" ,"numeric" , DataType.NUMERIC , DataType.TEXT , String.class ), + new Column("bool" ,"boolean" , DataType.BOOLEAN , DataType.BOOLEAN , Boolean.class ), + new Column("cdate" ,"date" , DataType.DATE , DataType.TEXT , String.class ), + new Column("ctime" ,"time" , DataType.TIME , DataType.TEXT , String.class ), + new Column("tm" ,"timestamp without time zone", DataType.TIMESTAMP , DataType.TEXT , String.class ), + new Column("tmz" ,"timestamp with time zone" , DataType.TIMESTAMP_WITH_TIME_ZONE, DataType.TEXT , String.class ), + new Column("c1" ,"character(3)" , DataType.BPCHAR , DataType.TEXT , String.class ), + new Column("vc1" ,"character varying(5)" , DataType.VARCHAR , DataType.TEXT , String.class ), + new Column("bin" ,"bytea" , DataType.BYTEA , DataType.BYTEA , ByteBuffer.class), + new Column("bool_arr" ,"boolean[]" , DataType.BOOLARRAY , DataType.TEXT , String.class ), + new Column("int2_arr" ,"smallint[]" , DataType.INT2ARRAY , DataType.TEXT , String.class ), + new Column("int_arr" ,"int[]" , DataType.INT4ARRAY , DataType.TEXT , String.class ), + new Column("int8_arr" ,"bigint[]" , DataType.INT8ARRAY , DataType.TEXT , String.class ), + new Column("float_arr" ,"real[]" , DataType.FLOAT4ARRAY , DataType.TEXT , String.class ), + new Column("float8_arr" ,"float[]" , DataType.FLOAT8ARRAY , DataType.TEXT , String.class ), + new Column("numeric_arr" ,"numeric[]" , DataType.NUMERICARRAY , DataType.TEXT , String.class ), + new Column("text_arr" ,"text[]" , DataType.TEXTARRAY , DataType.TEXT , String.class ), + new Column("bytea_arr" ,"bytea[]" , DataType.BYTEAARRAY , DataType.TEXT , String.class ), + new Column("char_arr" ,"bpchar(5)[]" , DataType.BPCHARARRAY , DataType.TEXT , String.class ), + new Column("varchar_arr" ,"varchar(5)[]" , DataType.VARCHARARRAY , DataType.TEXT , String.class ) + }; + + // the schema of the sample dataset as a list of ColumnDescriptor objects to use in mocking the RequestContext + public static final List COLUMN_DESCRIPTORS = IntStream + .range(0, COLUMNS.length) + .mapToObj(i -> new ColumnDescriptor(COLUMNS[i].getName(), COLUMNS[i].getType().getOID(), i, COLUMNS[i].sqlType, null)) + .collect(Collectors.toList()); + + private final List> table; // rows and columns that contain the sample dataset + + /** + * Constructor, creates a sample dataset in memory and holds it in the private field 'table'. + */ + public GPDataGenerator() { + this.table = new LinkedList<>(); + addRow(0, -1); // add the first row without any nulls, id column is never null + for (int row = 1; row < COLUMNS.length; row++) { + addRow(row, row); // add a row with null value for the same column index as the row index + } + } + + /** + * Helper method that generates values for all the columns of a new row with a given index and then + * adds the created row to the sample dataset. + * @param rowIndex index of the row + * @param nullColumnIndex index of the column to set to NULL value + */ + private void addRow(int rowIndex, int nullColumnIndex) { + // the values represent the result of CSV parsing of a row produced by Greenplum COPY command + // short/int/long/float/double and boolean values will be native Java types, bytea will be ByteBuffer + // the rest including NUMERIC and all arrays will be Strings + List row = new ArrayList<>(COLUMNS.length); + row.add(rowIndex); // 0 "id integer" + row.add(String.format("row-\"|%02d|\"", rowIndex)); // 1 "name text" + row.add((short) rowIndex); // 2 "sml smallint" + row.add(1000000 + rowIndex); // 3 "integ integer" + row.add(5555500000L + rowIndex); // 4 "bg bigint" + row.add(rowIndex + 0.0001f); // 5 "r real" + row.add(3.14159265358979d); // 6 "dp double precision" + row.add(String.format("12345678900000.00000%s", rowIndex)); // 7 "dec numeric" + row.add(rowIndex % 2 != 0); // 8 "bool boolean" + row.add(String.format("2010-01-%02d", (rowIndex % 30) + 1)); // 9 "cdate date" + row.add(String.format("10:11:%02d", rowIndex % 60)); // 10 "ctime time" + row.add(String.format("2013-07-13 21:00:05.%03d456", rowIndex % 1000)); // 11 "tm timestamp without time zone" + row.add(String.format("2013-07-13 21:00:05.%03d123-07", rowIndex % 1000)); // 12 "tmz timestamp with time zone" + row.add("abc"); // 13 "c1 character(3)" + row.add(" def "); // 14 "vc1 character varying(5)" + row.add(ByteBuffer.wrap(("b-" + rowIndex).getBytes(StandardCharsets.UTF_8))); // 15 "bin bytea" + + row.add(String.format("{t,f}", rowIndex)); // 16 "bool_arr boolean[]" + row.add(String.format("{1,2,3}", rowIndex)); // 17 "int2_arr smallint[]" + row.add(String.format("{1000000,2000000}", rowIndex)); // 18 "int_arr int[]" + row.add(String.format("{7777700000,7777700001}", rowIndex)); // 19 "int8_arr bigint[]" + row.add(String.format("{123.456,789.012}", rowIndex)); // 20 "float_arr real[]" + row.add(String.format("{123.456789,789.123456}", rowIndex)); // 21 "float8_arr float[]" + row.add(String.format("{12345678900000.000001,12345678900000.000001}", rowIndex)); // 22 "numeric_arr numeric[]" + row.add(String.format("{hello,world}", rowIndex)); // 23 "text_arr text[]" + row.add(String.format("{11,12}", rowIndex)); // 24 "bytea_arr bytea[]" + row.add(String.format("{abc,defij}", rowIndex)); // 25 "char_arr bpchar(5)[]" + row.add(String.format("{abcde,fijkl}", rowIndex)); // 26 "varchar_arr varchar(5)[]" + + // overwrite null index + if (nullColumnIndex >=0) { + row.set(nullColumnIndex, null); + } + table.add(row); + } + + /** + * Asserts that a given set of deserialized data represented by a collection of OneField objects contains all data + * in the sample dataset with correct values. + * @param rows a list of lists of OneField object - list of rows where each row is a list of column values + * where each column value is a OneField object with type and value produced by a deserializer + */ + public static void assertDataSet(List> rows) { + // produce the sample dataset in memory and compare its size with the dataset provided + GPDataGenerator generator = new GPDataGenerator(); + assertEquals(generator.table.size(), rows.size(), "Sizes of the datasets do not match"); + + // iterate over rows of the provided dataset + Iterator> tableIterator = generator.table.iterator(); + int row = 0; + for (List serializedRow : rows) { + List cells = tableIterator.next(); // pick a corresponding row from a sample dataset + // iterate over columns of a row of the provided dataset + int col = 0; + for (OneField field : serializedRow) { + // check the deserialized datatype + assertEquals(COLUMNS[col].getDeserializedType().getOID(), field.type, String.format("Type mismatch row=%d col=%d", row, col)); + // check Java type / value + if (cells.get(col) == null) { + assertNull(field.val, String.format("Expected null in row=%d col=%d", row, col)); + } else { + if (COLUMNS[col].getClazz() == ByteBuffer.class) { + assertTrue(ByteBuffer.class.isAssignableFrom(field.val.getClass())); + } else { + assertSame(COLUMNS[col].getClazz(), field.val.getClass(), String.format("Java class mismatch row=%d col=%d", row, col)); + } + // make adjustments to expected values + Object expected = cells.get(col); + if (col == 25) { // col 25 is char_arr bpchar(5)[], we need to account for the trailing whitespace + expected = "{\"abc \",defij}"; + } + assertEquals(expected, field.val, String.format("Value mismatch row=%d col=%d", row, col)); + } + col++; + } + row++; + } + } + + /** + * Main method that generates '$HOME/workspace/pxf/server/pxf-service/src/test/resources/data/sample_data.sql' file + * with DDL and DML statements to create the sample dataset in a Greenplum table and copy it to a set of files + * in different formats using the Greenplum COPY command. + * @param args program arguments, not used + */ + public static void main(String[] args) { + GPDataGenerator generator = new GPDataGenerator(); + try (PrintStream output = new PrintStream(dir + "/sample_data.sql")) { + generator.printTableDDL(output); + generator.printInsertDataDML(output); + generator.printCopyDataDML(output); + output.flush(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + /** + * Helper function to print the table DDL to the provided PrintStream. + * @param out print stream to print output to + */ + private void printTableDDL(PrintStream out) { + // print out SQL DDL statements + out.println("DROP TABLE IF EXISTS sample_data;"); + StringJoiner createTable = new StringJoiner(", ", "CREATE TABLE sample_data (", ") DISTRIBUTED BY (id);"); + for (Column column : COLUMNS) { + createTable.add(column.getName() + " " + column.getSqlType()); + } + out.println(createTable); + } + + /** + * Helper function to print the table INSERT DML to the provided PrintStream. + * @param out print stream to print output to + */ + private void printInsertDataDML(PrintStream out) { + // print out SQL DML statements + for (List row : table) { + StringJoiner insertRow = new StringJoiner(", ", "INSERT INTO sample_data VALUES (", ");"); + for (Object column : row) { + if (column == null) { + insertRow.add("NULL"); + } else if (column instanceof String) { + insertRow.add("'" + column + "'"); + } else if (column instanceof ByteBuffer) { + insertRow.add(String.format("'\\x%s'", Hex.encodeHexString((ByteBuffer) column))); + } else { + insertRow.add(column.toString()); + } + } + out.println(insertRow); + } + } + + /** + * Helper function to print the table COPY DDL to the provided PrintStream. + * @param out print stream to print output to + */ + private void printCopyDataDML(PrintStream out) { + // print out SQL DML statements + // use PSQL variables to dynamically determine the user's home directory and absolute path of the file + out.println("\\set data_dir `echo $HOME/workspace/pxf/server/pxf-service/src/test/resources/data/`"); + out.println("\\set txt_file :data_dir 'sample_data.txt'"); + out.println("\\set csv_file :data_dir 'sample_data.csv'"); + out.println("\\set pipe_csv_file :data_dir 'sample_data_pipe.csv'"); + + // use CTAS with ORDER BY to ensure an ordered set of rows in the output file + out.println("COPY (SELECT * FROM sample_data ORDER BY id) TO :'txt_file';"); + out.println("COPY (SELECT * FROM sample_data ORDER BY id) TO :'csv_file' CSV;"); + out.println("COPY (SELECT * FROM sample_data ORDER BY id) TO :'pipe_csv_file' CSV DELIMITER '|';"); + } + +} diff --git a/server/pxf-service/src/test/java/org/greenplum/pxf/service/bridge/SimpleBridgeFactoryTest.java b/server/pxf-service/src/test/java/org/greenplum/pxf/service/bridge/SimpleBridgeFactoryTest.java index 35f1ce8f5e..c3a5554708 100644 --- a/server/pxf-service/src/test/java/org/greenplum/pxf/service/bridge/SimpleBridgeFactoryTest.java +++ b/server/pxf-service/src/test/java/org/greenplum/pxf/service/bridge/SimpleBridgeFactoryTest.java @@ -3,6 +3,7 @@ import org.greenplum.pxf.api.model.GreenplumCSV; import org.greenplum.pxf.api.model.RequestContext; import org.greenplum.pxf.api.utilities.EnumAggregationType; +import org.greenplum.pxf.service.serde.RecordReaderFactory; import org.greenplum.pxf.service.utilities.BasePluginFactory; import org.greenplum.pxf.service.utilities.GSSFailureHandler; import org.junit.jupiter.api.BeforeEach; @@ -22,6 +23,8 @@ public class SimpleBridgeFactoryTest { @Mock private BasePluginFactory mockPluginFactory; @Mock + private RecordReaderFactory mockRecordReaderFactory; + @Mock private GSSFailureHandler mockFailureHandler; @Mock private RequestContext mockRequestContext; @@ -33,13 +36,17 @@ public class SimpleBridgeFactoryTest { @BeforeEach public void setup() { - factory = new SimpleBridgeFactory(mockPluginFactory, mockFailureHandler); + factory = new SimpleBridgeFactory(mockPluginFactory, mockRecordReaderFactory, mockFailureHandler); } @Test public void testWriteVectorized() { when(mockRequestContext.getRequestType()).thenReturn(RequestContext.RequestType.WRITE_BRIDGE); when(mockRequestContext.getResolver()).thenReturn("org.greenplum.pxf.service.bridge.TestWriteVectorizedResolver"); + when(mockPluginFactory.getPlugin(mockRequestContext, null)).thenReturn(null); // accessor + // resolver will be inspected for annotation, so we need to have a real object here + when(mockPluginFactory.getPlugin(mockRequestContext, "org.greenplum.pxf.service.bridge.TestWriteVectorizedResolver")) + .thenReturn(new TestWriteVectorizedResolver()); bridge = factory.getBridge(mockRequestContext); assertTrue(bridge instanceof WriteVectorizedBridge); } @@ -48,6 +55,10 @@ public void testWriteVectorized() { public void testWrite() { when(mockRequestContext.getRequestType()).thenReturn(RequestContext.RequestType.WRITE_BRIDGE); when(mockRequestContext.getResolver()).thenReturn("org.greenplum.pxf.service.bridge.TestResolver"); + when(mockPluginFactory.getPlugin(mockRequestContext, null)).thenReturn(null); // accessor + // resolver will be inspected for annotation, so we need to have a real object here + when(mockPluginFactory.getPlugin(mockRequestContext, "org.greenplum.pxf.service.bridge.TestResolver")) + .thenReturn(new TestResolver()); bridge = factory.getBridge(mockRequestContext); assertTrue(bridge instanceof WriteBridge); assertFalse(bridge instanceof WriteVectorizedBridge); diff --git a/server/pxf-service/src/test/java/org/greenplum/pxf/service/bridge/WriteBridgeTest.java b/server/pxf-service/src/test/java/org/greenplum/pxf/service/bridge/WriteBridgeTest.java index 1cd1016f1a..957bc5a1c4 100644 --- a/server/pxf-service/src/test/java/org/greenplum/pxf/service/bridge/WriteBridgeTest.java +++ b/server/pxf-service/src/test/java/org/greenplum/pxf/service/bridge/WriteBridgeTest.java @@ -3,6 +3,7 @@ import org.apache.hadoop.conf.Configuration; import org.greenplum.pxf.api.model.Accessor; import org.greenplum.pxf.api.model.RequestContext; +import org.greenplum.pxf.service.serde.RecordReaderFactory; import org.greenplum.pxf.service.utilities.BasePluginFactory; import org.greenplum.pxf.service.utilities.GSSFailureHandler; import org.junit.jupiter.api.BeforeEach; @@ -34,6 +35,8 @@ public class WriteBridgeTest { @Mock private BasePluginFactory mockPluginFactory; @Mock + private RecordReaderFactory mockRecordReaderFactory; + @Mock private Accessor mockAccessor1; @Mock private Accessor mockAccessor2; @@ -47,8 +50,8 @@ public void setup() { configuration = new Configuration(); configuration.set("hadoop.security.authentication", "kerberos"); context.setConfiguration(configuration); - context.setAccessor("org.greenplum.pxf.api.model.Accessor"); - context.setAccessor("org.greenplum.pxf.api.model.Resolver"); + context.setAccessor("org.greenplum.pxf.service.bridge.TestAccessor"); + context.setResolver("org.greenplum.pxf.service.bridge.TestResolver"); } @Test @@ -57,7 +60,7 @@ public void testBeginIterationFailureNoRetries() throws Exception { when(mockAccessor1.openForWrite()).thenThrow(new IOException("Something Else")); // constructor will call into mock factories, that's why we do not create WriteBridge in @Before method - bridge = new WriteBridge(mockPluginFactory, context, handler); + bridge = createWriteBridge(); Exception e = assertThrows(IOException.class, () -> bridge.beginIteration()); assertEquals("Something Else", e.getMessage()); @@ -76,7 +79,7 @@ public void testBeginIterationGSSFailureRetriedOnce() throws Exception { when(mockAccessor2.openForWrite()).thenReturn(true); // constructor will call into mock factories, that's why we do not create WriteBridge in @Before method - bridge = new WriteBridge(mockPluginFactory, context, handler); + bridge = createWriteBridge(); boolean result = bridge.beginIteration(); assertTrue(result); @@ -100,7 +103,7 @@ public void testBeginIterationGSSFailureRetriedTwice() throws Exception { when(mockAccessor3.openForWrite()).thenReturn(true); // constructor will call into mock factories, that's why we do not create WriteBridge in @Before method - bridge = new WriteBridge(mockPluginFactory, context, handler); + bridge = createWriteBridge(); boolean result = bridge.beginIteration(); assertTrue(result); @@ -126,7 +129,7 @@ public void testBeginIterationGSSFailureAfterMaxRetries() throws Exception { when(mockAccessor3.openForWrite()).thenThrow(new IOException("GSS initiate failed")); // constructor will call into mock factories, that's why we do not create WriteBridge in @Before method - bridge = new WriteBridge(mockPluginFactory, context, handler); + bridge = createWriteBridge(); Exception e = assertThrows(IOException.class, () -> bridge.beginIteration()); assertEquals("GSS initiate failed", e.getMessage()); @@ -142,9 +145,17 @@ public void testBeginIterationGSSFailureAfterMaxRetries() throws Exception { @Test public void testGetNextIsNotSupported() { - bridge = new WriteBridge(mockPluginFactory, context, handler); + when(mockPluginFactory.getPlugin(context, context.getAccessor())).thenReturn(mockAccessor1); + bridge = createWriteBridge(); Exception e = assertThrows(UnsupportedOperationException.class, () -> bridge.getNext()); assertEquals("Current operation is not supported", e.getMessage()); } + + private WriteBridge createWriteBridge() { + // resolver will be inspected for annotation, so we need to have a real object here + when(mockPluginFactory.getPlugin(context, "org.greenplum.pxf.service.bridge.TestResolver")) + .thenReturn(new TestResolver()); + return new WriteBridge(mockPluginFactory, mockRecordReaderFactory, context, handler); + } } diff --git a/server/pxf-service/src/test/java/org/greenplum/pxf/service/serde/RecordReaderFactoryTest.java b/server/pxf-service/src/test/java/org/greenplum/pxf/service/serde/RecordReaderFactoryTest.java new file mode 100644 index 0000000000..33120ee85c --- /dev/null +++ b/server/pxf-service/src/test/java/org/greenplum/pxf/service/serde/RecordReaderFactoryTest.java @@ -0,0 +1,49 @@ +package org.greenplum.pxf.service.serde; + +import org.greenplum.pxf.api.model.OutputFormat; +import org.greenplum.pxf.api.model.RequestContext; +import org.greenplum.pxf.plugins.hdfs.utilities.PgUtilities; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class RecordReaderFactoryTest { + + private RecordReaderFactory factory; + private RequestContext context; + + @BeforeEach + public void before() { + context = new RequestContext(); + factory = new RecordReaderFactory(new PgUtilities()); + } + + @Test + public void testGetGPDBWritableReader() { + context.setOutputFormat(OutputFormat.GPDBWritable); + assertTrue(factory.getRecordReader(context, false) instanceof GPDBWritableRecordReader); + assertTrue(factory.getRecordReader(context, true) instanceof GPDBWritableRecordReader); + } + + @Test + public void testGetStreamRecordReader() { + context.setOutputFormat(OutputFormat.TEXT); + assertTrue(factory.getRecordReader(context, true) instanceof StreamRecordReader); + } + + @Test + public void testGetTextRecordReader() { + context.setOutputFormat(OutputFormat.TEXT); + assertTrue(factory.getRecordReader(context, false) instanceof TextRecordReader); + } + + @Test + public void testGetReaderErrorNoOutputFormat() { + context.setOutputFormat(null); + Throwable thrown = assertThrows(NullPointerException.class, () -> factory.getRecordReader(context, false)); + assertEquals("outputFormat is not set in RequestContext", thrown.getMessage()); + } +} diff --git a/server/pxf-service/src/test/java/org/greenplum/pxf/service/serde/StreamRecordReaderTest.java b/server/pxf-service/src/test/java/org/greenplum/pxf/service/serde/StreamRecordReaderTest.java new file mode 100644 index 0000000000..117984fdca --- /dev/null +++ b/server/pxf-service/src/test/java/org/greenplum/pxf/service/serde/StreamRecordReaderTest.java @@ -0,0 +1,34 @@ +package org.greenplum.pxf.service.serde; + +import org.greenplum.pxf.api.OneField; +import org.greenplum.pxf.api.io.DataType; +import org.greenplum.pxf.api.model.RequestContext; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +import java.io.DataInputStream; +import java.util.List; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.mockito.Mockito.verifyNoInteractions; + +@ExtendWith(MockitoExtension.class) +public class StreamRecordReaderTest { + @Mock + private DataInputStream mockInputStream; + + @Test + public void testReadRecord() throws Exception { + StreamRecordReader reader = new StreamRecordReader(new RequestContext()); + List record = reader.readRecord(mockInputStream); + assertNotNull(record); + assertEquals(1, record.size()); + assertEquals(DataType.BYTEA.getOID(), record.get(0).type); + assertSame(mockInputStream, record.get(0).val); + verifyNoInteractions(mockInputStream); // no reading should actually happen + } +} diff --git a/server/pxf-service/src/test/java/org/greenplum/pxf/service/serde/TextRecordReaderTest.java b/server/pxf-service/src/test/java/org/greenplum/pxf/service/serde/TextRecordReaderTest.java new file mode 100644 index 0000000000..0d5b46cea1 --- /dev/null +++ b/server/pxf-service/src/test/java/org/greenplum/pxf/service/serde/TextRecordReaderTest.java @@ -0,0 +1,83 @@ +package org.greenplum.pxf.service.serde; + +import org.greenplum.pxf.api.OneField; +import org.greenplum.pxf.api.model.GreenplumCSV; +import org.greenplum.pxf.api.model.RequestContext; +import org.greenplum.pxf.plugins.hdfs.utilities.PgUtilities; +import org.greenplum.pxf.service.GPDataGenerator; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +import java.io.DataInput; +import java.io.DataInputStream; +import java.nio.charset.StandardCharsets; +import java.util.LinkedList; +import java.util.List; + +import static org.mockito.Mockito.when; + +@ExtendWith(MockitoExtension.class) +public class TextRecordReaderTest { + + private TextRecordReader reader; + private List> rows; + + @Mock + private RequestContext mockRequestContext; + @Mock + private GreenplumCSV mockGreenplumCSV; + + @BeforeEach + private void before() { + rows = new LinkedList<>(); + } + + @Test + public void testReadCSVFile() throws Exception { + runScenario(GPDataGenerator.FORMAT.CSV); + } + + @Test + public void testReadTextFile() throws Exception { + runScenario(GPDataGenerator.FORMAT.TEXT); + } + + @Test + public void testReadPipeCSVFile() throws Exception { + runScenario(GPDataGenerator.FORMAT.CSV_PIPE); + } + + /** + * Run the test scenario where the sample data of a given format is read from a previously generated file, + * deserialized by the TextRecordReader and then compared with the original data values in Java object format. + * @param format format of data + * @throws Exception if a problem occurs when reading data + */ + private void runScenario(GPDataGenerator.FORMAT format) throws Exception { + when(mockRequestContext.getDatabaseEncoding()).thenReturn(StandardCharsets.UTF_8); + when(mockRequestContext.getGreenplumCSV()).thenReturn(mockGreenplumCSV); + when(mockGreenplumCSV.getNewline()).thenReturn("\n"); + when(mockGreenplumCSV.getDelimiter()).thenReturn(format.getDelimiter()); + when(mockGreenplumCSV.getQuote()).thenReturn(format.getQuote()); + when(mockGreenplumCSV.getEscape()).thenReturn(format.getEscape()); + when(mockGreenplumCSV.getValueOfNull()).thenReturn(format.getNil()); + when(mockRequestContext.getTupleDescription()).thenReturn(GPDataGenerator.COLUMN_DESCRIPTORS); + reader = new TextRecordReader(mockRequestContext, new PgUtilities()); + + // read data from the input stream backed by the file with sample data previously generated by the GPDataGenerator + DataInput input = new DataInputStream(getClass().getClassLoader().getResourceAsStream("data/" + format.getFilename())); + List record = reader.readRecord(input); + while (record != null) { + rows.add(record); + record = reader.readRecord(input); + } + + // assert that data read and deserialized matches the data originally generated + GPDataGenerator.assertDataSet(rows); + + } +} + diff --git a/server/pxf-service/src/test/resources/data/README.md b/server/pxf-service/src/test/resources/data/README.md new file mode 100644 index 0000000000..35237c38bd --- /dev/null +++ b/server/pxf-service/src/test/resources/data/README.md @@ -0,0 +1,4 @@ +The files in this directory are auto-generated. Steps to reproduce: + +1. Run `GPDataGenerator` from your IDE, it will generate `sample_data.sql` in this directory. +2. Run `psql -f sample_data.sql pxfautomation` in this directory that will generate `sample_data.*` data files \ No newline at end of file diff --git a/server/pxf-service/src/test/resources/data/sample_data.csv b/server/pxf-service/src/test/resources/data/sample_data.csv new file mode 100644 index 0000000000..299d62417e --- /dev/null +++ b/server/pxf-service/src/test/resources/data/sample_data.csv @@ -0,0 +1,27 @@ +0,"row-""|00|""",0,1000000,5555500000,0.0001,3.14159265358979,12345678900000.000000,f,2010-01-01,10:11:00,2013-07-13 21:00:05.000456,2013-07-13 21:00:05.000123-07,abc, def ,b-0,"{t,f}","{1,2,3}","{1000000,2000000}","{7777700000,7777700001}","{123.456,789.012}","{123.456789,789.123456}","{12345678900000.000001,12345678900000.000001}","{hello,world}","{11,12}","{""abc "",defij}","{abcde,fijkl}" +1,,1,1000001,5555500001,1.0001,3.14159265358979,12345678900000.000001,t,2010-01-02,10:11:01,2013-07-13 21:00:05.001456,2013-07-13 21:00:05.001123-07,abc, def ,b-1,"{t,f}","{1,2,3}","{1000000,2000000}","{7777700000,7777700001}","{123.456,789.012}","{123.456789,789.123456}","{12345678900000.000001,12345678900000.000001}","{hello,world}","{11,12}","{""abc "",defij}","{abcde,fijkl}" +2,"row-""|02|""",,1000002,5555500002,2.0001,3.14159265358979,12345678900000.000002,f,2010-01-03,10:11:02,2013-07-13 21:00:05.002456,2013-07-13 21:00:05.002123-07,abc, def ,b-2,"{t,f}","{1,2,3}","{1000000,2000000}","{7777700000,7777700001}","{123.456,789.012}","{123.456789,789.123456}","{12345678900000.000001,12345678900000.000001}","{hello,world}","{11,12}","{""abc "",defij}","{abcde,fijkl}" +3,"row-""|03|""",3,,5555500003,3.0001,3.14159265358979,12345678900000.000003,t,2010-01-04,10:11:03,2013-07-13 21:00:05.003456,2013-07-13 21:00:05.003123-07,abc, def ,b-3,"{t,f}","{1,2,3}","{1000000,2000000}","{7777700000,7777700001}","{123.456,789.012}","{123.456789,789.123456}","{12345678900000.000001,12345678900000.000001}","{hello,world}","{11,12}","{""abc "",defij}","{abcde,fijkl}" +4,"row-""|04|""",4,1000004,,4.0001,3.14159265358979,12345678900000.000004,f,2010-01-05,10:11:04,2013-07-13 21:00:05.004456,2013-07-13 21:00:05.004123-07,abc, def ,b-4,"{t,f}","{1,2,3}","{1000000,2000000}","{7777700000,7777700001}","{123.456,789.012}","{123.456789,789.123456}","{12345678900000.000001,12345678900000.000001}","{hello,world}","{11,12}","{""abc "",defij}","{abcde,fijkl}" +5,"row-""|05|""",5,1000005,5555500005,,3.14159265358979,12345678900000.000005,t,2010-01-06,10:11:05,2013-07-13 21:00:05.005456,2013-07-13 21:00:05.005123-07,abc, def ,b-5,"{t,f}","{1,2,3}","{1000000,2000000}","{7777700000,7777700001}","{123.456,789.012}","{123.456789,789.123456}","{12345678900000.000001,12345678900000.000001}","{hello,world}","{11,12}","{""abc "",defij}","{abcde,fijkl}" +6,"row-""|06|""",6,1000006,5555500006,6.0001,,12345678900000.000006,f,2010-01-07,10:11:06,2013-07-13 21:00:05.006456,2013-07-13 21:00:05.006123-07,abc, def ,b-6,"{t,f}","{1,2,3}","{1000000,2000000}","{7777700000,7777700001}","{123.456,789.012}","{123.456789,789.123456}","{12345678900000.000001,12345678900000.000001}","{hello,world}","{11,12}","{""abc "",defij}","{abcde,fijkl}" +7,"row-""|07|""",7,1000007,5555500007,7.0001,3.14159265358979,,t,2010-01-08,10:11:07,2013-07-13 21:00:05.007456,2013-07-13 21:00:05.007123-07,abc, def ,b-7,"{t,f}","{1,2,3}","{1000000,2000000}","{7777700000,7777700001}","{123.456,789.012}","{123.456789,789.123456}","{12345678900000.000001,12345678900000.000001}","{hello,world}","{11,12}","{""abc "",defij}","{abcde,fijkl}" +8,"row-""|08|""",8,1000008,5555500008,8.0001,3.14159265358979,12345678900000.000008,,2010-01-09,10:11:08,2013-07-13 21:00:05.008456,2013-07-13 21:00:05.008123-07,abc, def ,b-8,"{t,f}","{1,2,3}","{1000000,2000000}","{7777700000,7777700001}","{123.456,789.012}","{123.456789,789.123456}","{12345678900000.000001,12345678900000.000001}","{hello,world}","{11,12}","{""abc "",defij}","{abcde,fijkl}" +9,"row-""|09|""",9,1000009,5555500009,9.0001,3.14159265358979,12345678900000.000009,t,,10:11:09,2013-07-13 21:00:05.009456,2013-07-13 21:00:05.009123-07,abc, def ,b-9,"{t,f}","{1,2,3}","{1000000,2000000}","{7777700000,7777700001}","{123.456,789.012}","{123.456789,789.123456}","{12345678900000.000001,12345678900000.000001}","{hello,world}","{11,12}","{""abc "",defij}","{abcde,fijkl}" +10,"row-""|10|""",10,1000010,5555500010,10.0001,3.14159265358979,12345678900000.0000010,f,2010-01-11,,2013-07-13 21:00:05.010456,2013-07-13 21:00:05.010123-07,abc, def ,b-10,"{t,f}","{1,2,3}","{1000000,2000000}","{7777700000,7777700001}","{123.456,789.012}","{123.456789,789.123456}","{12345678900000.000001,12345678900000.000001}","{hello,world}","{11,12}","{""abc "",defij}","{abcde,fijkl}" +11,"row-""|11|""",11,1000011,5555500011,11.0001,3.14159265358979,12345678900000.0000011,t,2010-01-12,10:11:11,,2013-07-13 21:00:05.011123-07,abc, def ,b-11,"{t,f}","{1,2,3}","{1000000,2000000}","{7777700000,7777700001}","{123.456,789.012}","{123.456789,789.123456}","{12345678900000.000001,12345678900000.000001}","{hello,world}","{11,12}","{""abc "",defij}","{abcde,fijkl}" +12,"row-""|12|""",12,1000012,5555500012,12.0001,3.14159265358979,12345678900000.0000012,f,2010-01-13,10:11:12,2013-07-13 21:00:05.012456,,abc, def ,b-12,"{t,f}","{1,2,3}","{1000000,2000000}","{7777700000,7777700001}","{123.456,789.012}","{123.456789,789.123456}","{12345678900000.000001,12345678900000.000001}","{hello,world}","{11,12}","{""abc "",defij}","{abcde,fijkl}" +13,"row-""|13|""",13,1000013,5555500013,13.0001,3.14159265358979,12345678900000.0000013,t,2010-01-14,10:11:13,2013-07-13 21:00:05.013456,2013-07-13 21:00:05.013123-07,, def ,b-13,"{t,f}","{1,2,3}","{1000000,2000000}","{7777700000,7777700001}","{123.456,789.012}","{123.456789,789.123456}","{12345678900000.000001,12345678900000.000001}","{hello,world}","{11,12}","{""abc "",defij}","{abcde,fijkl}" +14,"row-""|14|""",14,1000014,5555500014,14.0001,3.14159265358979,12345678900000.0000014,f,2010-01-15,10:11:14,2013-07-13 21:00:05.014456,2013-07-13 21:00:05.014123-07,abc,,b-14,"{t,f}","{1,2,3}","{1000000,2000000}","{7777700000,7777700001}","{123.456,789.012}","{123.456789,789.123456}","{12345678900000.000001,12345678900000.000001}","{hello,world}","{11,12}","{""abc "",defij}","{abcde,fijkl}" +15,"row-""|15|""",15,1000015,5555500015,15.0001,3.14159265358979,12345678900000.0000015,t,2010-01-16,10:11:15,2013-07-13 21:00:05.015456,2013-07-13 21:00:05.015123-07,abc, def ,,"{t,f}","{1,2,3}","{1000000,2000000}","{7777700000,7777700001}","{123.456,789.012}","{123.456789,789.123456}","{12345678900000.000001,12345678900000.000001}","{hello,world}","{11,12}","{""abc "",defij}","{abcde,fijkl}" +16,"row-""|16|""",16,1000016,5555500016,16.0001,3.14159265358979,12345678900000.0000016,f,2010-01-17,10:11:16,2013-07-13 21:00:05.016456,2013-07-13 21:00:05.016123-07,abc, def ,b-16,,"{1,2,3}","{1000000,2000000}","{7777700000,7777700001}","{123.456,789.012}","{123.456789,789.123456}","{12345678900000.000001,12345678900000.000001}","{hello,world}","{11,12}","{""abc "",defij}","{abcde,fijkl}" +17,"row-""|17|""",17,1000017,5555500017,17.0001,3.14159265358979,12345678900000.0000017,t,2010-01-18,10:11:17,2013-07-13 21:00:05.017456,2013-07-13 21:00:05.017123-07,abc, def ,b-17,"{t,f}",,"{1000000,2000000}","{7777700000,7777700001}","{123.456,789.012}","{123.456789,789.123456}","{12345678900000.000001,12345678900000.000001}","{hello,world}","{11,12}","{""abc "",defij}","{abcde,fijkl}" +18,"row-""|18|""",18,1000018,5555500018,18.0001,3.14159265358979,12345678900000.0000018,f,2010-01-19,10:11:18,2013-07-13 21:00:05.018456,2013-07-13 21:00:05.018123-07,abc, def ,b-18,"{t,f}","{1,2,3}",,"{7777700000,7777700001}","{123.456,789.012}","{123.456789,789.123456}","{12345678900000.000001,12345678900000.000001}","{hello,world}","{11,12}","{""abc "",defij}","{abcde,fijkl}" +19,"row-""|19|""",19,1000019,5555500019,19.0001,3.14159265358979,12345678900000.0000019,t,2010-01-20,10:11:19,2013-07-13 21:00:05.019456,2013-07-13 21:00:05.019123-07,abc, def ,b-19,"{t,f}","{1,2,3}","{1000000,2000000}",,"{123.456,789.012}","{123.456789,789.123456}","{12345678900000.000001,12345678900000.000001}","{hello,world}","{11,12}","{""abc "",defij}","{abcde,fijkl}" +20,"row-""|20|""",20,1000020,5555500020,20.0001,3.14159265358979,12345678900000.0000020,f,2010-01-21,10:11:20,2013-07-13 21:00:05.020456,2013-07-13 21:00:05.020123-07,abc, def ,b-20,"{t,f}","{1,2,3}","{1000000,2000000}","{7777700000,7777700001}",,"{123.456789,789.123456}","{12345678900000.000001,12345678900000.000001}","{hello,world}","{11,12}","{""abc "",defij}","{abcde,fijkl}" +21,"row-""|21|""",21,1000021,5555500021,21.0001,3.14159265358979,12345678900000.0000021,t,2010-01-22,10:11:21,2013-07-13 21:00:05.021456,2013-07-13 21:00:05.021123-07,abc, def ,b-21,"{t,f}","{1,2,3}","{1000000,2000000}","{7777700000,7777700001}","{123.456,789.012}",,"{12345678900000.000001,12345678900000.000001}","{hello,world}","{11,12}","{""abc "",defij}","{abcde,fijkl}" +22,"row-""|22|""",22,1000022,5555500022,22.0001,3.14159265358979,12345678900000.0000022,f,2010-01-23,10:11:22,2013-07-13 21:00:05.022456,2013-07-13 21:00:05.022123-07,abc, def ,b-22,"{t,f}","{1,2,3}","{1000000,2000000}","{7777700000,7777700001}","{123.456,789.012}","{123.456789,789.123456}",,"{hello,world}","{11,12}","{""abc "",defij}","{abcde,fijkl}" +23,"row-""|23|""",23,1000023,5555500023,23.0001,3.14159265358979,12345678900000.0000023,t,2010-01-24,10:11:23,2013-07-13 21:00:05.023456,2013-07-13 21:00:05.023123-07,abc, def ,b-23,"{t,f}","{1,2,3}","{1000000,2000000}","{7777700000,7777700001}","{123.456,789.012}","{123.456789,789.123456}","{12345678900000.000001,12345678900000.000001}",,"{11,12}","{""abc "",defij}","{abcde,fijkl}" +24,"row-""|24|""",24,1000024,5555500024,24.0001,3.14159265358979,12345678900000.0000024,f,2010-01-25,10:11:24,2013-07-13 21:00:05.024456,2013-07-13 21:00:05.024123-07,abc, def ,b-24,"{t,f}","{1,2,3}","{1000000,2000000}","{7777700000,7777700001}","{123.456,789.012}","{123.456789,789.123456}","{12345678900000.000001,12345678900000.000001}","{hello,world}",,"{""abc "",defij}","{abcde,fijkl}" +25,"row-""|25|""",25,1000025,5555500025,25.0001,3.14159265358979,12345678900000.0000025,t,2010-01-26,10:11:25,2013-07-13 21:00:05.025456,2013-07-13 21:00:05.025123-07,abc, def ,b-25,"{t,f}","{1,2,3}","{1000000,2000000}","{7777700000,7777700001}","{123.456,789.012}","{123.456789,789.123456}","{12345678900000.000001,12345678900000.000001}","{hello,world}","{11,12}",,"{abcde,fijkl}" +26,"row-""|26|""",26,1000026,5555500026,26.0001,3.14159265358979,12345678900000.0000026,f,2010-01-27,10:11:26,2013-07-13 21:00:05.026456,2013-07-13 21:00:05.026123-07,abc, def ,b-26,"{t,f}","{1,2,3}","{1000000,2000000}","{7777700000,7777700001}","{123.456,789.012}","{123.456789,789.123456}","{12345678900000.000001,12345678900000.000001}","{hello,world}","{11,12}","{""abc "",defij}", diff --git a/server/pxf-service/src/test/resources/data/sample_data.sql b/server/pxf-service/src/test/resources/data/sample_data.sql new file mode 100644 index 0000000000..dbf6a0c2a0 --- /dev/null +++ b/server/pxf-service/src/test/resources/data/sample_data.sql @@ -0,0 +1,36 @@ +DROP TABLE IF EXISTS sample_data; +CREATE TABLE sample_data (id integer, name text, sml smallint, integ integer, bg bigint, r real, dp double precision, dec numeric, bool boolean, cdate date, ctime time, tm timestamp without time zone, tmz timestamp with time zone, c1 character(3), vc1 character varying(5), bin bytea, bool_arr boolean[], int2_arr smallint[], int_arr int[], int8_arr bigint[], float_arr real[], float8_arr float[], numeric_arr numeric[], text_arr text[], bytea_arr bytea[], char_arr bpchar(5)[], varchar_arr varchar(5)[]) DISTRIBUTED BY (id); +INSERT INTO sample_data VALUES (0, 'row-"|00|"', 0, 1000000, 5555500000, 1.0E-4, 3.14159265358979, '12345678900000.000000', false, '2010-01-01', '10:11:00', '2013-07-13 21:00:05.000456', '2013-07-13 21:00:05.000123-07', 'abc', ' def ', '\x622d30', '{t,f}', '{1,2,3}', '{1000000,2000000}', '{7777700000,7777700001}', '{123.456,789.012}', '{123.456789,789.123456}', '{12345678900000.000001,12345678900000.000001}', '{hello,world}', '{11,12}', '{abc,defij}', '{abcde,fijkl}'); +INSERT INTO sample_data VALUES (1, NULL, 1, 1000001, 5555500001, 1.0001, 3.14159265358979, '12345678900000.000001', true, '2010-01-02', '10:11:01', '2013-07-13 21:00:05.001456', '2013-07-13 21:00:05.001123-07', 'abc', ' def ', '\x622d31', '{t,f}', '{1,2,3}', '{1000000,2000000}', '{7777700000,7777700001}', '{123.456,789.012}', '{123.456789,789.123456}', '{12345678900000.000001,12345678900000.000001}', '{hello,world}', '{11,12}', '{abc,defij}', '{abcde,fijkl}'); +INSERT INTO sample_data VALUES (2, 'row-"|02|"', NULL, 1000002, 5555500002, 2.0001, 3.14159265358979, '12345678900000.000002', false, '2010-01-03', '10:11:02', '2013-07-13 21:00:05.002456', '2013-07-13 21:00:05.002123-07', 'abc', ' def ', '\x622d32', '{t,f}', '{1,2,3}', '{1000000,2000000}', '{7777700000,7777700001}', '{123.456,789.012}', '{123.456789,789.123456}', '{12345678900000.000001,12345678900000.000001}', '{hello,world}', '{11,12}', '{abc,defij}', '{abcde,fijkl}'); +INSERT INTO sample_data VALUES (3, 'row-"|03|"', 3, NULL, 5555500003, 3.0001, 3.14159265358979, '12345678900000.000003', true, '2010-01-04', '10:11:03', '2013-07-13 21:00:05.003456', '2013-07-13 21:00:05.003123-07', 'abc', ' def ', '\x622d33', '{t,f}', '{1,2,3}', '{1000000,2000000}', '{7777700000,7777700001}', '{123.456,789.012}', '{123.456789,789.123456}', '{12345678900000.000001,12345678900000.000001}', '{hello,world}', '{11,12}', '{abc,defij}', '{abcde,fijkl}'); +INSERT INTO sample_data VALUES (4, 'row-"|04|"', 4, 1000004, NULL, 4.0001, 3.14159265358979, '12345678900000.000004', false, '2010-01-05', '10:11:04', '2013-07-13 21:00:05.004456', '2013-07-13 21:00:05.004123-07', 'abc', ' def ', '\x622d34', '{t,f}', '{1,2,3}', '{1000000,2000000}', '{7777700000,7777700001}', '{123.456,789.012}', '{123.456789,789.123456}', '{12345678900000.000001,12345678900000.000001}', '{hello,world}', '{11,12}', '{abc,defij}', '{abcde,fijkl}'); +INSERT INTO sample_data VALUES (5, 'row-"|05|"', 5, 1000005, 5555500005, NULL, 3.14159265358979, '12345678900000.000005', true, '2010-01-06', '10:11:05', '2013-07-13 21:00:05.005456', '2013-07-13 21:00:05.005123-07', 'abc', ' def ', '\x622d35', '{t,f}', '{1,2,3}', '{1000000,2000000}', '{7777700000,7777700001}', '{123.456,789.012}', '{123.456789,789.123456}', '{12345678900000.000001,12345678900000.000001}', '{hello,world}', '{11,12}', '{abc,defij}', '{abcde,fijkl}'); +INSERT INTO sample_data VALUES (6, 'row-"|06|"', 6, 1000006, 5555500006, 6.0001, NULL, '12345678900000.000006', false, '2010-01-07', '10:11:06', '2013-07-13 21:00:05.006456', '2013-07-13 21:00:05.006123-07', 'abc', ' def ', '\x622d36', '{t,f}', '{1,2,3}', '{1000000,2000000}', '{7777700000,7777700001}', '{123.456,789.012}', '{123.456789,789.123456}', '{12345678900000.000001,12345678900000.000001}', '{hello,world}', '{11,12}', '{abc,defij}', '{abcde,fijkl}'); +INSERT INTO sample_data VALUES (7, 'row-"|07|"', 7, 1000007, 5555500007, 7.0001, 3.14159265358979, NULL, true, '2010-01-08', '10:11:07', '2013-07-13 21:00:05.007456', '2013-07-13 21:00:05.007123-07', 'abc', ' def ', '\x622d37', '{t,f}', '{1,2,3}', '{1000000,2000000}', '{7777700000,7777700001}', '{123.456,789.012}', '{123.456789,789.123456}', '{12345678900000.000001,12345678900000.000001}', '{hello,world}', '{11,12}', '{abc,defij}', '{abcde,fijkl}'); +INSERT INTO sample_data VALUES (8, 'row-"|08|"', 8, 1000008, 5555500008, 8.0001, 3.14159265358979, '12345678900000.000008', NULL, '2010-01-09', '10:11:08', '2013-07-13 21:00:05.008456', '2013-07-13 21:00:05.008123-07', 'abc', ' def ', '\x622d38', '{t,f}', '{1,2,3}', '{1000000,2000000}', '{7777700000,7777700001}', '{123.456,789.012}', '{123.456789,789.123456}', '{12345678900000.000001,12345678900000.000001}', '{hello,world}', '{11,12}', '{abc,defij}', '{abcde,fijkl}'); +INSERT INTO sample_data VALUES (9, 'row-"|09|"', 9, 1000009, 5555500009, 9.0001, 3.14159265358979, '12345678900000.000009', true, NULL, '10:11:09', '2013-07-13 21:00:05.009456', '2013-07-13 21:00:05.009123-07', 'abc', ' def ', '\x622d39', '{t,f}', '{1,2,3}', '{1000000,2000000}', '{7777700000,7777700001}', '{123.456,789.012}', '{123.456789,789.123456}', '{12345678900000.000001,12345678900000.000001}', '{hello,world}', '{11,12}', '{abc,defij}', '{abcde,fijkl}'); +INSERT INTO sample_data VALUES (10, 'row-"|10|"', 10, 1000010, 5555500010, 10.0001, 3.14159265358979, '12345678900000.0000010', false, '2010-01-11', NULL, '2013-07-13 21:00:05.010456', '2013-07-13 21:00:05.010123-07', 'abc', ' def ', '\x622d3130', '{t,f}', '{1,2,3}', '{1000000,2000000}', '{7777700000,7777700001}', '{123.456,789.012}', '{123.456789,789.123456}', '{12345678900000.000001,12345678900000.000001}', '{hello,world}', '{11,12}', '{abc,defij}', '{abcde,fijkl}'); +INSERT INTO sample_data VALUES (11, 'row-"|11|"', 11, 1000011, 5555500011, 11.0001, 3.14159265358979, '12345678900000.0000011', true, '2010-01-12', '10:11:11', NULL, '2013-07-13 21:00:05.011123-07', 'abc', ' def ', '\x622d3131', '{t,f}', '{1,2,3}', '{1000000,2000000}', '{7777700000,7777700001}', '{123.456,789.012}', '{123.456789,789.123456}', '{12345678900000.000001,12345678900000.000001}', '{hello,world}', '{11,12}', '{abc,defij}', '{abcde,fijkl}'); +INSERT INTO sample_data VALUES (12, 'row-"|12|"', 12, 1000012, 5555500012, 12.0001, 3.14159265358979, '12345678900000.0000012', false, '2010-01-13', '10:11:12', '2013-07-13 21:00:05.012456', NULL, 'abc', ' def ', '\x622d3132', '{t,f}', '{1,2,3}', '{1000000,2000000}', '{7777700000,7777700001}', '{123.456,789.012}', '{123.456789,789.123456}', '{12345678900000.000001,12345678900000.000001}', '{hello,world}', '{11,12}', '{abc,defij}', '{abcde,fijkl}'); +INSERT INTO sample_data VALUES (13, 'row-"|13|"', 13, 1000013, 5555500013, 13.0001, 3.14159265358979, '12345678900000.0000013', true, '2010-01-14', '10:11:13', '2013-07-13 21:00:05.013456', '2013-07-13 21:00:05.013123-07', NULL, ' def ', '\x622d3133', '{t,f}', '{1,2,3}', '{1000000,2000000}', '{7777700000,7777700001}', '{123.456,789.012}', '{123.456789,789.123456}', '{12345678900000.000001,12345678900000.000001}', '{hello,world}', '{11,12}', '{abc,defij}', '{abcde,fijkl}'); +INSERT INTO sample_data VALUES (14, 'row-"|14|"', 14, 1000014, 5555500014, 14.0001, 3.14159265358979, '12345678900000.0000014', false, '2010-01-15', '10:11:14', '2013-07-13 21:00:05.014456', '2013-07-13 21:00:05.014123-07', 'abc', NULL, '\x622d3134', '{t,f}', '{1,2,3}', '{1000000,2000000}', '{7777700000,7777700001}', '{123.456,789.012}', '{123.456789,789.123456}', '{12345678900000.000001,12345678900000.000001}', '{hello,world}', '{11,12}', '{abc,defij}', '{abcde,fijkl}'); +INSERT INTO sample_data VALUES (15, 'row-"|15|"', 15, 1000015, 5555500015, 15.0001, 3.14159265358979, '12345678900000.0000015', true, '2010-01-16', '10:11:15', '2013-07-13 21:00:05.015456', '2013-07-13 21:00:05.015123-07', 'abc', ' def ', NULL, '{t,f}', '{1,2,3}', '{1000000,2000000}', '{7777700000,7777700001}', '{123.456,789.012}', '{123.456789,789.123456}', '{12345678900000.000001,12345678900000.000001}', '{hello,world}', '{11,12}', '{abc,defij}', '{abcde,fijkl}'); +INSERT INTO sample_data VALUES (16, 'row-"|16|"', 16, 1000016, 5555500016, 16.0001, 3.14159265358979, '12345678900000.0000016', false, '2010-01-17', '10:11:16', '2013-07-13 21:00:05.016456', '2013-07-13 21:00:05.016123-07', 'abc', ' def ', '\x622d3136', NULL, '{1,2,3}', '{1000000,2000000}', '{7777700000,7777700001}', '{123.456,789.012}', '{123.456789,789.123456}', '{12345678900000.000001,12345678900000.000001}', '{hello,world}', '{11,12}', '{abc,defij}', '{abcde,fijkl}'); +INSERT INTO sample_data VALUES (17, 'row-"|17|"', 17, 1000017, 5555500017, 17.0001, 3.14159265358979, '12345678900000.0000017', true, '2010-01-18', '10:11:17', '2013-07-13 21:00:05.017456', '2013-07-13 21:00:05.017123-07', 'abc', ' def ', '\x622d3137', '{t,f}', NULL, '{1000000,2000000}', '{7777700000,7777700001}', '{123.456,789.012}', '{123.456789,789.123456}', '{12345678900000.000001,12345678900000.000001}', '{hello,world}', '{11,12}', '{abc,defij}', '{abcde,fijkl}'); +INSERT INTO sample_data VALUES (18, 'row-"|18|"', 18, 1000018, 5555500018, 18.0001, 3.14159265358979, '12345678900000.0000018', false, '2010-01-19', '10:11:18', '2013-07-13 21:00:05.018456', '2013-07-13 21:00:05.018123-07', 'abc', ' def ', '\x622d3138', '{t,f}', '{1,2,3}', NULL, '{7777700000,7777700001}', '{123.456,789.012}', '{123.456789,789.123456}', '{12345678900000.000001,12345678900000.000001}', '{hello,world}', '{11,12}', '{abc,defij}', '{abcde,fijkl}'); +INSERT INTO sample_data VALUES (19, 'row-"|19|"', 19, 1000019, 5555500019, 19.0001, 3.14159265358979, '12345678900000.0000019', true, '2010-01-20', '10:11:19', '2013-07-13 21:00:05.019456', '2013-07-13 21:00:05.019123-07', 'abc', ' def ', '\x622d3139', '{t,f}', '{1,2,3}', '{1000000,2000000}', NULL, '{123.456,789.012}', '{123.456789,789.123456}', '{12345678900000.000001,12345678900000.000001}', '{hello,world}', '{11,12}', '{abc,defij}', '{abcde,fijkl}'); +INSERT INTO sample_data VALUES (20, 'row-"|20|"', 20, 1000020, 5555500020, 20.0001, 3.14159265358979, '12345678900000.0000020', false, '2010-01-21', '10:11:20', '2013-07-13 21:00:05.020456', '2013-07-13 21:00:05.020123-07', 'abc', ' def ', '\x622d3230', '{t,f}', '{1,2,3}', '{1000000,2000000}', '{7777700000,7777700001}', NULL, '{123.456789,789.123456}', '{12345678900000.000001,12345678900000.000001}', '{hello,world}', '{11,12}', '{abc,defij}', '{abcde,fijkl}'); +INSERT INTO sample_data VALUES (21, 'row-"|21|"', 21, 1000021, 5555500021, 21.0001, 3.14159265358979, '12345678900000.0000021', true, '2010-01-22', '10:11:21', '2013-07-13 21:00:05.021456', '2013-07-13 21:00:05.021123-07', 'abc', ' def ', '\x622d3231', '{t,f}', '{1,2,3}', '{1000000,2000000}', '{7777700000,7777700001}', '{123.456,789.012}', NULL, '{12345678900000.000001,12345678900000.000001}', '{hello,world}', '{11,12}', '{abc,defij}', '{abcde,fijkl}'); +INSERT INTO sample_data VALUES (22, 'row-"|22|"', 22, 1000022, 5555500022, 22.0001, 3.14159265358979, '12345678900000.0000022', false, '2010-01-23', '10:11:22', '2013-07-13 21:00:05.022456', '2013-07-13 21:00:05.022123-07', 'abc', ' def ', '\x622d3232', '{t,f}', '{1,2,3}', '{1000000,2000000}', '{7777700000,7777700001}', '{123.456,789.012}', '{123.456789,789.123456}', NULL, '{hello,world}', '{11,12}', '{abc,defij}', '{abcde,fijkl}'); +INSERT INTO sample_data VALUES (23, 'row-"|23|"', 23, 1000023, 5555500023, 23.0001, 3.14159265358979, '12345678900000.0000023', true, '2010-01-24', '10:11:23', '2013-07-13 21:00:05.023456', '2013-07-13 21:00:05.023123-07', 'abc', ' def ', '\x622d3233', '{t,f}', '{1,2,3}', '{1000000,2000000}', '{7777700000,7777700001}', '{123.456,789.012}', '{123.456789,789.123456}', '{12345678900000.000001,12345678900000.000001}', NULL, '{11,12}', '{abc,defij}', '{abcde,fijkl}'); +INSERT INTO sample_data VALUES (24, 'row-"|24|"', 24, 1000024, 5555500024, 24.0001, 3.14159265358979, '12345678900000.0000024', false, '2010-01-25', '10:11:24', '2013-07-13 21:00:05.024456', '2013-07-13 21:00:05.024123-07', 'abc', ' def ', '\x622d3234', '{t,f}', '{1,2,3}', '{1000000,2000000}', '{7777700000,7777700001}', '{123.456,789.012}', '{123.456789,789.123456}', '{12345678900000.000001,12345678900000.000001}', '{hello,world}', NULL, '{abc,defij}', '{abcde,fijkl}'); +INSERT INTO sample_data VALUES (25, 'row-"|25|"', 25, 1000025, 5555500025, 25.0001, 3.14159265358979, '12345678900000.0000025', true, '2010-01-26', '10:11:25', '2013-07-13 21:00:05.025456', '2013-07-13 21:00:05.025123-07', 'abc', ' def ', '\x622d3235', '{t,f}', '{1,2,3}', '{1000000,2000000}', '{7777700000,7777700001}', '{123.456,789.012}', '{123.456789,789.123456}', '{12345678900000.000001,12345678900000.000001}', '{hello,world}', '{11,12}', NULL, '{abcde,fijkl}'); +INSERT INTO sample_data VALUES (26, 'row-"|26|"', 26, 1000026, 5555500026, 26.0001, 3.14159265358979, '12345678900000.0000026', false, '2010-01-27', '10:11:26', '2013-07-13 21:00:05.026456', '2013-07-13 21:00:05.026123-07', 'abc', ' def ', '\x622d3236', '{t,f}', '{1,2,3}', '{1000000,2000000}', '{7777700000,7777700001}', '{123.456,789.012}', '{123.456789,789.123456}', '{12345678900000.000001,12345678900000.000001}', '{hello,world}', '{11,12}', '{abc,defij}', NULL); +\set data_dir `echo $HOME/workspace/pxf/server/pxf-service/src/test/resources/data/` +\set txt_file :data_dir 'sample_data.txt' +\set csv_file :data_dir 'sample_data.csv' +\set pipe_csv_file :data_dir 'sample_data_pipe.csv' +COPY (SELECT * FROM sample_data ORDER BY id) TO :'txt_file'; +COPY (SELECT * FROM sample_data ORDER BY id) TO :'csv_file' CSV; +COPY (SELECT * FROM sample_data ORDER BY id) TO :'pipe_csv_file' CSV DELIMITER '|'; diff --git a/server/pxf-service/src/test/resources/data/sample_data.txt b/server/pxf-service/src/test/resources/data/sample_data.txt new file mode 100644 index 0000000000..ff2c31d3d3 --- /dev/null +++ b/server/pxf-service/src/test/resources/data/sample_data.txt @@ -0,0 +1,27 @@ +0 row-"|00|" 0 1000000 5555500000 0.0001 3.14159265358979 12345678900000.000000 f 2010-01-01 10:11:00 2013-07-13 21:00:05.000456 2013-07-13 21:00:05.000123-07 abc def b-0 {t,f} {1,2,3} {1000000,2000000} {7777700000,7777700001} {123.456,789.012} {123.456789,789.123456} {12345678900000.000001,12345678900000.000001} {hello,world} {11,12} {"abc ",defij} {abcde,fijkl} +1 \N 1 1000001 5555500001 1.0001 3.14159265358979 12345678900000.000001 t 2010-01-02 10:11:01 2013-07-13 21:00:05.001456 2013-07-13 21:00:05.001123-07 abc def b-1 {t,f} {1,2,3} {1000000,2000000} {7777700000,7777700001} {123.456,789.012} {123.456789,789.123456} {12345678900000.000001,12345678900000.000001} {hello,world} {11,12} {"abc ",defij} {abcde,fijkl} +2 row-"|02|" \N 1000002 5555500002 2.0001 3.14159265358979 12345678900000.000002 f 2010-01-03 10:11:02 2013-07-13 21:00:05.002456 2013-07-13 21:00:05.002123-07 abc def b-2 {t,f} {1,2,3} {1000000,2000000} {7777700000,7777700001} {123.456,789.012} {123.456789,789.123456} {12345678900000.000001,12345678900000.000001} {hello,world} {11,12} {"abc ",defij} {abcde,fijkl} +3 row-"|03|" 3 \N 5555500003 3.0001 3.14159265358979 12345678900000.000003 t 2010-01-04 10:11:03 2013-07-13 21:00:05.003456 2013-07-13 21:00:05.003123-07 abc def b-3 {t,f} {1,2,3} {1000000,2000000} {7777700000,7777700001} {123.456,789.012} {123.456789,789.123456} {12345678900000.000001,12345678900000.000001} {hello,world} {11,12} {"abc ",defij} {abcde,fijkl} +4 row-"|04|" 4 1000004 \N 4.0001 3.14159265358979 12345678900000.000004 f 2010-01-05 10:11:04 2013-07-13 21:00:05.004456 2013-07-13 21:00:05.004123-07 abc def b-4 {t,f} {1,2,3} {1000000,2000000} {7777700000,7777700001} {123.456,789.012} {123.456789,789.123456} {12345678900000.000001,12345678900000.000001} {hello,world} {11,12} {"abc ",defij} {abcde,fijkl} +5 row-"|05|" 5 1000005 5555500005 \N 3.14159265358979 12345678900000.000005 t 2010-01-06 10:11:05 2013-07-13 21:00:05.005456 2013-07-13 21:00:05.005123-07 abc def b-5 {t,f} {1,2,3} {1000000,2000000} {7777700000,7777700001} {123.456,789.012} {123.456789,789.123456} {12345678900000.000001,12345678900000.000001} {hello,world} {11,12} {"abc ",defij} {abcde,fijkl} +6 row-"|06|" 6 1000006 5555500006 6.0001 \N 12345678900000.000006 f 2010-01-07 10:11:06 2013-07-13 21:00:05.006456 2013-07-13 21:00:05.006123-07 abc def b-6 {t,f} {1,2,3} {1000000,2000000} {7777700000,7777700001} {123.456,789.012} {123.456789,789.123456} {12345678900000.000001,12345678900000.000001} {hello,world} {11,12} {"abc ",defij} {abcde,fijkl} +7 row-"|07|" 7 1000007 5555500007 7.0001 3.14159265358979 \N t 2010-01-08 10:11:07 2013-07-13 21:00:05.007456 2013-07-13 21:00:05.007123-07 abc def b-7 {t,f} {1,2,3} {1000000,2000000} {7777700000,7777700001} {123.456,789.012} {123.456789,789.123456} {12345678900000.000001,12345678900000.000001} {hello,world} {11,12} {"abc ",defij} {abcde,fijkl} +8 row-"|08|" 8 1000008 5555500008 8.0001 3.14159265358979 12345678900000.000008 \N 2010-01-09 10:11:08 2013-07-13 21:00:05.008456 2013-07-13 21:00:05.008123-07 abc def b-8 {t,f} {1,2,3} {1000000,2000000} {7777700000,7777700001} {123.456,789.012} {123.456789,789.123456} {12345678900000.000001,12345678900000.000001} {hello,world} {11,12} {"abc ",defij} {abcde,fijkl} +9 row-"|09|" 9 1000009 5555500009 9.0001 3.14159265358979 12345678900000.000009 t \N 10:11:09 2013-07-13 21:00:05.009456 2013-07-13 21:00:05.009123-07 abc def b-9 {t,f} {1,2,3} {1000000,2000000} {7777700000,7777700001} {123.456,789.012} {123.456789,789.123456} {12345678900000.000001,12345678900000.000001} {hello,world} {11,12} {"abc ",defij} {abcde,fijkl} +10 row-"|10|" 10 1000010 5555500010 10.0001 3.14159265358979 12345678900000.0000010 f 2010-01-11 \N 2013-07-13 21:00:05.010456 2013-07-13 21:00:05.010123-07 abc def b-10 {t,f} {1,2,3} {1000000,2000000} {7777700000,7777700001} {123.456,789.012} {123.456789,789.123456} {12345678900000.000001,12345678900000.000001} {hello,world} {11,12} {"abc ",defij} {abcde,fijkl} +11 row-"|11|" 11 1000011 5555500011 11.0001 3.14159265358979 12345678900000.0000011 t 2010-01-12 10:11:11 \N 2013-07-13 21:00:05.011123-07 abc def b-11 {t,f} {1,2,3} {1000000,2000000} {7777700000,7777700001} {123.456,789.012} {123.456789,789.123456} {12345678900000.000001,12345678900000.000001} {hello,world} {11,12} {"abc ",defij} {abcde,fijkl} +12 row-"|12|" 12 1000012 5555500012 12.0001 3.14159265358979 12345678900000.0000012 f 2010-01-13 10:11:12 2013-07-13 21:00:05.012456 \N abc def b-12 {t,f} {1,2,3} {1000000,2000000} {7777700000,7777700001} {123.456,789.012} {123.456789,789.123456} {12345678900000.000001,12345678900000.000001} {hello,world} {11,12} {"abc ",defij} {abcde,fijkl} +13 row-"|13|" 13 1000013 5555500013 13.0001 3.14159265358979 12345678900000.0000013 t 2010-01-14 10:11:13 2013-07-13 21:00:05.013456 2013-07-13 21:00:05.013123-07 \N def b-13 {t,f} {1,2,3} {1000000,2000000} {7777700000,7777700001} {123.456,789.012} {123.456789,789.123456} {12345678900000.000001,12345678900000.000001} {hello,world} {11,12} {"abc ",defij} {abcde,fijkl} +14 row-"|14|" 14 1000014 5555500014 14.0001 3.14159265358979 12345678900000.0000014 f 2010-01-15 10:11:14 2013-07-13 21:00:05.014456 2013-07-13 21:00:05.014123-07 abc \N b-14 {t,f} {1,2,3} {1000000,2000000} {7777700000,7777700001} {123.456,789.012} {123.456789,789.123456} {12345678900000.000001,12345678900000.000001} {hello,world} {11,12} {"abc ",defij} {abcde,fijkl} +15 row-"|15|" 15 1000015 5555500015 15.0001 3.14159265358979 12345678900000.0000015 t 2010-01-16 10:11:15 2013-07-13 21:00:05.015456 2013-07-13 21:00:05.015123-07 abc def \N {t,f} {1,2,3} {1000000,2000000} {7777700000,7777700001} {123.456,789.012} {123.456789,789.123456} {12345678900000.000001,12345678900000.000001} {hello,world} {11,12} {"abc ",defij} {abcde,fijkl} +16 row-"|16|" 16 1000016 5555500016 16.0001 3.14159265358979 12345678900000.0000016 f 2010-01-17 10:11:16 2013-07-13 21:00:05.016456 2013-07-13 21:00:05.016123-07 abc def b-16 \N {1,2,3} {1000000,2000000} {7777700000,7777700001} {123.456,789.012} {123.456789,789.123456} {12345678900000.000001,12345678900000.000001} {hello,world} {11,12} {"abc ",defij} {abcde,fijkl} +17 row-"|17|" 17 1000017 5555500017 17.0001 3.14159265358979 12345678900000.0000017 t 2010-01-18 10:11:17 2013-07-13 21:00:05.017456 2013-07-13 21:00:05.017123-07 abc def b-17 {t,f} \N {1000000,2000000} {7777700000,7777700001} {123.456,789.012} {123.456789,789.123456} {12345678900000.000001,12345678900000.000001} {hello,world} {11,12} {"abc ",defij} {abcde,fijkl} +18 row-"|18|" 18 1000018 5555500018 18.0001 3.14159265358979 12345678900000.0000018 f 2010-01-19 10:11:18 2013-07-13 21:00:05.018456 2013-07-13 21:00:05.018123-07 abc def b-18 {t,f} {1,2,3} \N {7777700000,7777700001} {123.456,789.012} {123.456789,789.123456} {12345678900000.000001,12345678900000.000001} {hello,world} {11,12} {"abc ",defij} {abcde,fijkl} +19 row-"|19|" 19 1000019 5555500019 19.0001 3.14159265358979 12345678900000.0000019 t 2010-01-20 10:11:19 2013-07-13 21:00:05.019456 2013-07-13 21:00:05.019123-07 abc def b-19 {t,f} {1,2,3} {1000000,2000000} \N {123.456,789.012} {123.456789,789.123456} {12345678900000.000001,12345678900000.000001} {hello,world} {11,12} {"abc ",defij} {abcde,fijkl} +20 row-"|20|" 20 1000020 5555500020 20.0001 3.14159265358979 12345678900000.0000020 f 2010-01-21 10:11:20 2013-07-13 21:00:05.020456 2013-07-13 21:00:05.020123-07 abc def b-20 {t,f} {1,2,3} {1000000,2000000} {7777700000,7777700001} \N {123.456789,789.123456} {12345678900000.000001,12345678900000.000001} {hello,world} {11,12} {"abc ",defij} {abcde,fijkl} +21 row-"|21|" 21 1000021 5555500021 21.0001 3.14159265358979 12345678900000.0000021 t 2010-01-22 10:11:21 2013-07-13 21:00:05.021456 2013-07-13 21:00:05.021123-07 abc def b-21 {t,f} {1,2,3} {1000000,2000000} {7777700000,7777700001} {123.456,789.012} \N {12345678900000.000001,12345678900000.000001} {hello,world} {11,12} {"abc ",defij} {abcde,fijkl} +22 row-"|22|" 22 1000022 5555500022 22.0001 3.14159265358979 12345678900000.0000022 f 2010-01-23 10:11:22 2013-07-13 21:00:05.022456 2013-07-13 21:00:05.022123-07 abc def b-22 {t,f} {1,2,3} {1000000,2000000} {7777700000,7777700001} {123.456,789.012} {123.456789,789.123456} \N {hello,world} {11,12} {"abc ",defij} {abcde,fijkl} +23 row-"|23|" 23 1000023 5555500023 23.0001 3.14159265358979 12345678900000.0000023 t 2010-01-24 10:11:23 2013-07-13 21:00:05.023456 2013-07-13 21:00:05.023123-07 abc def b-23 {t,f} {1,2,3} {1000000,2000000} {7777700000,7777700001} {123.456,789.012} {123.456789,789.123456} {12345678900000.000001,12345678900000.000001} \N {11,12} {"abc ",defij} {abcde,fijkl} +24 row-"|24|" 24 1000024 5555500024 24.0001 3.14159265358979 12345678900000.0000024 f 2010-01-25 10:11:24 2013-07-13 21:00:05.024456 2013-07-13 21:00:05.024123-07 abc def b-24 {t,f} {1,2,3} {1000000,2000000} {7777700000,7777700001} {123.456,789.012} {123.456789,789.123456} {12345678900000.000001,12345678900000.000001} {hello,world} \N {"abc ",defij} {abcde,fijkl} +25 row-"|25|" 25 1000025 5555500025 25.0001 3.14159265358979 12345678900000.0000025 t 2010-01-26 10:11:25 2013-07-13 21:00:05.025456 2013-07-13 21:00:05.025123-07 abc def b-25 {t,f} {1,2,3} {1000000,2000000} {7777700000,7777700001} {123.456,789.012} {123.456789,789.123456} {12345678900000.000001,12345678900000.000001} {hello,world} {11,12} \N {abcde,fijkl} +26 row-"|26|" 26 1000026 5555500026 26.0001 3.14159265358979 12345678900000.0000026 f 2010-01-27 10:11:26 2013-07-13 21:00:05.026456 2013-07-13 21:00:05.026123-07 abc def b-26 {t,f} {1,2,3} {1000000,2000000} {7777700000,7777700001} {123.456,789.012} {123.456789,789.123456} {12345678900000.000001,12345678900000.000001} {hello,world} {11,12} {"abc ",defij} \N diff --git a/server/pxf-service/src/test/resources/data/sample_data_pipe.csv b/server/pxf-service/src/test/resources/data/sample_data_pipe.csv new file mode 100644 index 0000000000..90aa21d611 --- /dev/null +++ b/server/pxf-service/src/test/resources/data/sample_data_pipe.csv @@ -0,0 +1,27 @@ +0|"row-""|00|"""|0|1000000|5555500000|0.0001|3.14159265358979|12345678900000.000000|f|2010-01-01|10:11:00|2013-07-13 21:00:05.000456|2013-07-13 21:00:05.000123-07|abc| def |b-0|{t,f}|{1,2,3}|{1000000,2000000}|{7777700000,7777700001}|{123.456,789.012}|{123.456789,789.123456}|{12345678900000.000001,12345678900000.000001}|{hello,world}|{11,12}|"{""abc "",defij}"|{abcde,fijkl} +1||1|1000001|5555500001|1.0001|3.14159265358979|12345678900000.000001|t|2010-01-02|10:11:01|2013-07-13 21:00:05.001456|2013-07-13 21:00:05.001123-07|abc| def |b-1|{t,f}|{1,2,3}|{1000000,2000000}|{7777700000,7777700001}|{123.456,789.012}|{123.456789,789.123456}|{12345678900000.000001,12345678900000.000001}|{hello,world}|{11,12}|"{""abc "",defij}"|{abcde,fijkl} +2|"row-""|02|"""||1000002|5555500002|2.0001|3.14159265358979|12345678900000.000002|f|2010-01-03|10:11:02|2013-07-13 21:00:05.002456|2013-07-13 21:00:05.002123-07|abc| def |b-2|{t,f}|{1,2,3}|{1000000,2000000}|{7777700000,7777700001}|{123.456,789.012}|{123.456789,789.123456}|{12345678900000.000001,12345678900000.000001}|{hello,world}|{11,12}|"{""abc "",defij}"|{abcde,fijkl} +3|"row-""|03|"""|3||5555500003|3.0001|3.14159265358979|12345678900000.000003|t|2010-01-04|10:11:03|2013-07-13 21:00:05.003456|2013-07-13 21:00:05.003123-07|abc| def |b-3|{t,f}|{1,2,3}|{1000000,2000000}|{7777700000,7777700001}|{123.456,789.012}|{123.456789,789.123456}|{12345678900000.000001,12345678900000.000001}|{hello,world}|{11,12}|"{""abc "",defij}"|{abcde,fijkl} +4|"row-""|04|"""|4|1000004||4.0001|3.14159265358979|12345678900000.000004|f|2010-01-05|10:11:04|2013-07-13 21:00:05.004456|2013-07-13 21:00:05.004123-07|abc| def |b-4|{t,f}|{1,2,3}|{1000000,2000000}|{7777700000,7777700001}|{123.456,789.012}|{123.456789,789.123456}|{12345678900000.000001,12345678900000.000001}|{hello,world}|{11,12}|"{""abc "",defij}"|{abcde,fijkl} +5|"row-""|05|"""|5|1000005|5555500005||3.14159265358979|12345678900000.000005|t|2010-01-06|10:11:05|2013-07-13 21:00:05.005456|2013-07-13 21:00:05.005123-07|abc| def |b-5|{t,f}|{1,2,3}|{1000000,2000000}|{7777700000,7777700001}|{123.456,789.012}|{123.456789,789.123456}|{12345678900000.000001,12345678900000.000001}|{hello,world}|{11,12}|"{""abc "",defij}"|{abcde,fijkl} +6|"row-""|06|"""|6|1000006|5555500006|6.0001||12345678900000.000006|f|2010-01-07|10:11:06|2013-07-13 21:00:05.006456|2013-07-13 21:00:05.006123-07|abc| def |b-6|{t,f}|{1,2,3}|{1000000,2000000}|{7777700000,7777700001}|{123.456,789.012}|{123.456789,789.123456}|{12345678900000.000001,12345678900000.000001}|{hello,world}|{11,12}|"{""abc "",defij}"|{abcde,fijkl} +7|"row-""|07|"""|7|1000007|5555500007|7.0001|3.14159265358979||t|2010-01-08|10:11:07|2013-07-13 21:00:05.007456|2013-07-13 21:00:05.007123-07|abc| def |b-7|{t,f}|{1,2,3}|{1000000,2000000}|{7777700000,7777700001}|{123.456,789.012}|{123.456789,789.123456}|{12345678900000.000001,12345678900000.000001}|{hello,world}|{11,12}|"{""abc "",defij}"|{abcde,fijkl} +8|"row-""|08|"""|8|1000008|5555500008|8.0001|3.14159265358979|12345678900000.000008||2010-01-09|10:11:08|2013-07-13 21:00:05.008456|2013-07-13 21:00:05.008123-07|abc| def |b-8|{t,f}|{1,2,3}|{1000000,2000000}|{7777700000,7777700001}|{123.456,789.012}|{123.456789,789.123456}|{12345678900000.000001,12345678900000.000001}|{hello,world}|{11,12}|"{""abc "",defij}"|{abcde,fijkl} +9|"row-""|09|"""|9|1000009|5555500009|9.0001|3.14159265358979|12345678900000.000009|t||10:11:09|2013-07-13 21:00:05.009456|2013-07-13 21:00:05.009123-07|abc| def |b-9|{t,f}|{1,2,3}|{1000000,2000000}|{7777700000,7777700001}|{123.456,789.012}|{123.456789,789.123456}|{12345678900000.000001,12345678900000.000001}|{hello,world}|{11,12}|"{""abc "",defij}"|{abcde,fijkl} +10|"row-""|10|"""|10|1000010|5555500010|10.0001|3.14159265358979|12345678900000.0000010|f|2010-01-11||2013-07-13 21:00:05.010456|2013-07-13 21:00:05.010123-07|abc| def |b-10|{t,f}|{1,2,3}|{1000000,2000000}|{7777700000,7777700001}|{123.456,789.012}|{123.456789,789.123456}|{12345678900000.000001,12345678900000.000001}|{hello,world}|{11,12}|"{""abc "",defij}"|{abcde,fijkl} +11|"row-""|11|"""|11|1000011|5555500011|11.0001|3.14159265358979|12345678900000.0000011|t|2010-01-12|10:11:11||2013-07-13 21:00:05.011123-07|abc| def |b-11|{t,f}|{1,2,3}|{1000000,2000000}|{7777700000,7777700001}|{123.456,789.012}|{123.456789,789.123456}|{12345678900000.000001,12345678900000.000001}|{hello,world}|{11,12}|"{""abc "",defij}"|{abcde,fijkl} +12|"row-""|12|"""|12|1000012|5555500012|12.0001|3.14159265358979|12345678900000.0000012|f|2010-01-13|10:11:12|2013-07-13 21:00:05.012456||abc| def |b-12|{t,f}|{1,2,3}|{1000000,2000000}|{7777700000,7777700001}|{123.456,789.012}|{123.456789,789.123456}|{12345678900000.000001,12345678900000.000001}|{hello,world}|{11,12}|"{""abc "",defij}"|{abcde,fijkl} +13|"row-""|13|"""|13|1000013|5555500013|13.0001|3.14159265358979|12345678900000.0000013|t|2010-01-14|10:11:13|2013-07-13 21:00:05.013456|2013-07-13 21:00:05.013123-07|| def |b-13|{t,f}|{1,2,3}|{1000000,2000000}|{7777700000,7777700001}|{123.456,789.012}|{123.456789,789.123456}|{12345678900000.000001,12345678900000.000001}|{hello,world}|{11,12}|"{""abc "",defij}"|{abcde,fijkl} +14|"row-""|14|"""|14|1000014|5555500014|14.0001|3.14159265358979|12345678900000.0000014|f|2010-01-15|10:11:14|2013-07-13 21:00:05.014456|2013-07-13 21:00:05.014123-07|abc||b-14|{t,f}|{1,2,3}|{1000000,2000000}|{7777700000,7777700001}|{123.456,789.012}|{123.456789,789.123456}|{12345678900000.000001,12345678900000.000001}|{hello,world}|{11,12}|"{""abc "",defij}"|{abcde,fijkl} +15|"row-""|15|"""|15|1000015|5555500015|15.0001|3.14159265358979|12345678900000.0000015|t|2010-01-16|10:11:15|2013-07-13 21:00:05.015456|2013-07-13 21:00:05.015123-07|abc| def ||{t,f}|{1,2,3}|{1000000,2000000}|{7777700000,7777700001}|{123.456,789.012}|{123.456789,789.123456}|{12345678900000.000001,12345678900000.000001}|{hello,world}|{11,12}|"{""abc "",defij}"|{abcde,fijkl} +16|"row-""|16|"""|16|1000016|5555500016|16.0001|3.14159265358979|12345678900000.0000016|f|2010-01-17|10:11:16|2013-07-13 21:00:05.016456|2013-07-13 21:00:05.016123-07|abc| def |b-16||{1,2,3}|{1000000,2000000}|{7777700000,7777700001}|{123.456,789.012}|{123.456789,789.123456}|{12345678900000.000001,12345678900000.000001}|{hello,world}|{11,12}|"{""abc "",defij}"|{abcde,fijkl} +17|"row-""|17|"""|17|1000017|5555500017|17.0001|3.14159265358979|12345678900000.0000017|t|2010-01-18|10:11:17|2013-07-13 21:00:05.017456|2013-07-13 21:00:05.017123-07|abc| def |b-17|{t,f}||{1000000,2000000}|{7777700000,7777700001}|{123.456,789.012}|{123.456789,789.123456}|{12345678900000.000001,12345678900000.000001}|{hello,world}|{11,12}|"{""abc "",defij}"|{abcde,fijkl} +18|"row-""|18|"""|18|1000018|5555500018|18.0001|3.14159265358979|12345678900000.0000018|f|2010-01-19|10:11:18|2013-07-13 21:00:05.018456|2013-07-13 21:00:05.018123-07|abc| def |b-18|{t,f}|{1,2,3}||{7777700000,7777700001}|{123.456,789.012}|{123.456789,789.123456}|{12345678900000.000001,12345678900000.000001}|{hello,world}|{11,12}|"{""abc "",defij}"|{abcde,fijkl} +19|"row-""|19|"""|19|1000019|5555500019|19.0001|3.14159265358979|12345678900000.0000019|t|2010-01-20|10:11:19|2013-07-13 21:00:05.019456|2013-07-13 21:00:05.019123-07|abc| def |b-19|{t,f}|{1,2,3}|{1000000,2000000}||{123.456,789.012}|{123.456789,789.123456}|{12345678900000.000001,12345678900000.000001}|{hello,world}|{11,12}|"{""abc "",defij}"|{abcde,fijkl} +20|"row-""|20|"""|20|1000020|5555500020|20.0001|3.14159265358979|12345678900000.0000020|f|2010-01-21|10:11:20|2013-07-13 21:00:05.020456|2013-07-13 21:00:05.020123-07|abc| def |b-20|{t,f}|{1,2,3}|{1000000,2000000}|{7777700000,7777700001}||{123.456789,789.123456}|{12345678900000.000001,12345678900000.000001}|{hello,world}|{11,12}|"{""abc "",defij}"|{abcde,fijkl} +21|"row-""|21|"""|21|1000021|5555500021|21.0001|3.14159265358979|12345678900000.0000021|t|2010-01-22|10:11:21|2013-07-13 21:00:05.021456|2013-07-13 21:00:05.021123-07|abc| def |b-21|{t,f}|{1,2,3}|{1000000,2000000}|{7777700000,7777700001}|{123.456,789.012}||{12345678900000.000001,12345678900000.000001}|{hello,world}|{11,12}|"{""abc "",defij}"|{abcde,fijkl} +22|"row-""|22|"""|22|1000022|5555500022|22.0001|3.14159265358979|12345678900000.0000022|f|2010-01-23|10:11:22|2013-07-13 21:00:05.022456|2013-07-13 21:00:05.022123-07|abc| def |b-22|{t,f}|{1,2,3}|{1000000,2000000}|{7777700000,7777700001}|{123.456,789.012}|{123.456789,789.123456}||{hello,world}|{11,12}|"{""abc "",defij}"|{abcde,fijkl} +23|"row-""|23|"""|23|1000023|5555500023|23.0001|3.14159265358979|12345678900000.0000023|t|2010-01-24|10:11:23|2013-07-13 21:00:05.023456|2013-07-13 21:00:05.023123-07|abc| def |b-23|{t,f}|{1,2,3}|{1000000,2000000}|{7777700000,7777700001}|{123.456,789.012}|{123.456789,789.123456}|{12345678900000.000001,12345678900000.000001}||{11,12}|"{""abc "",defij}"|{abcde,fijkl} +24|"row-""|24|"""|24|1000024|5555500024|24.0001|3.14159265358979|12345678900000.0000024|f|2010-01-25|10:11:24|2013-07-13 21:00:05.024456|2013-07-13 21:00:05.024123-07|abc| def |b-24|{t,f}|{1,2,3}|{1000000,2000000}|{7777700000,7777700001}|{123.456,789.012}|{123.456789,789.123456}|{12345678900000.000001,12345678900000.000001}|{hello,world}||"{""abc "",defij}"|{abcde,fijkl} +25|"row-""|25|"""|25|1000025|5555500025|25.0001|3.14159265358979|12345678900000.0000025|t|2010-01-26|10:11:25|2013-07-13 21:00:05.025456|2013-07-13 21:00:05.025123-07|abc| def |b-25|{t,f}|{1,2,3}|{1000000,2000000}|{7777700000,7777700001}|{123.456,789.012}|{123.456789,789.123456}|{12345678900000.000001,12345678900000.000001}|{hello,world}|{11,12}||{abcde,fijkl} +26|"row-""|26|"""|26|1000026|5555500026|26.0001|3.14159265358979|12345678900000.0000026|f|2010-01-27|10:11:26|2013-07-13 21:00:05.026456|2013-07-13 21:00:05.026123-07|abc| def |b-26|{t,f}|{1,2,3}|{1000000,2000000}|{7777700000,7777700001}|{123.456,789.012}|{123.456789,789.123456}|{12345678900000.000001,12345678900000.000001}|{hello,world}|{11,12}|"{""abc "",defij}"| From 12addb9cea62644061852c766be889b1e20b66be Mon Sep 17 00:00:00 2001 From: Alexander Denissov Date: Tue, 6 Jun 2023 10:38:05 -0700 Subject: [PATCH 11/35] Restrict PXF to listen to local requests only (#976) * Document option to restrict PXF to listen to local requests only * added newline * set server.address to localhost by default --- .../pxf-service/src/main/resources/application.properties | 2 ++ .../src/templates/conf/pxf-application.properties | 6 +++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/server/pxf-service/src/main/resources/application.properties b/server/pxf-service/src/main/resources/application.properties index 16a1f0b2d1..2954c591b8 100644 --- a/server/pxf-service/src/main/resources/application.properties +++ b/server/pxf-service/src/main/resources/application.properties @@ -22,6 +22,8 @@ pxf.service.kerberos.constrained-delegation.credential-cache.expiration=1d spring.profiles.active=default +# server network interface and port to bind the listening socket to, use localhost by default for local traffic only +server.address=localhost server.port=${pxf.port:5888} # Whitelabel error options diff --git a/server/pxf-service/src/templates/conf/pxf-application.properties b/server/pxf-service/src/templates/conf/pxf-application.properties index 3047ba932b..622d45a13c 100644 --- a/server/pxf-service/src/templates/conf/pxf-application.properties +++ b/server/pxf-service/src/templates/conf/pxf-application.properties @@ -20,4 +20,8 @@ # Logging # To enable debug logging, uncomment and change `info` to `debug` here -# pxf.log.level=info \ No newline at end of file +# pxf.log.level=info + +# Security +# Specify IP address (or hostname) of network interface that PXF listens to, or set to 0.0.0.0 for all interfaces +# server.address=localhost From 8cc8fecda1ae0b7369f65e0305c281c603664fd6 Mon Sep 17 00:00:00 2001 From: Lisa Owen Date: Thu, 8 Jun 2023 12:31:07 -0600 Subject: [PATCH 12/35] docs - multibyte delimiter support for csv (#977) * docs - multibyte delimiter support for text/csv * clarifications/edits requested by ashuka * clarifications after chat with ashuka/alex, reorg the info a bit * address some comments from ashuka * remove extraneous words * re-jig the NEWLINE info, add new section * both NEWLINE formatter and LOCATION options take only CR/LF/CRLF * combine the yes/quote yes/escape rows --- docs/content/hdfs_text.html.md.erb | 145 +++++++++++++++++++++++++++++ 1 file changed, 145 insertions(+) diff --git a/docs/content/hdfs_text.html.md.erb b/docs/content/hdfs_text.html.md.erb index 1787733a3d..4ddd903123 100644 --- a/docs/content/hdfs_text.html.md.erb +++ b/docs/content/hdfs_text.html.md.erb @@ -365,3 +365,148 @@ Perform the following procedure to create Greenplum Database writable external t To query data from the newly-created HDFS directory named `pxfwritable_hdfs_textsimple2`, you can create a readable external Greenplum Database table as described above that references this HDFS directory and specifies `FORMAT 'CSV' (delimiter=':')`. +## About Setting the External Table Encoding + +When the external file encoding differs from the database encoding, you must set the external table `ENCODING` to match that of the data file. For example, if the database encoding is `UTF8` and the file encoding is `LATIN1`, create the external table as follows: + +``` +CREATE EXTERNAL TABLE pxf_csv_latin1(location text, month text, num_orders int, total_sales float8) + LOCATION ('pxf://data/pxf_examples/pxf_hdfs_simple.txt?PROFILE=hdfs:csv') +FORMAT 'CSV' ENCODING 'LATIN1'; +``` + +## About Reading Data Containing Multi-Byte or Multi-Character Delimiters + +You can use only a `*:csv` PXF profile to read data that contains a multi-byte delimiter or multiple delimiter characters. The syntax for creating a readable external table for such data follows: + +``` sql +CREATE EXTERNAL TABLE + ( [, ...] | LIKE ) +LOCATION ('pxf://?PROFILE=hdfs:csv[&SERVER=][&IGNORE_MISSING_PATH=][&SKIP_HEADER_COUNT=][&NEWLINE=]') +FORMAT 'CUSTOM' (FORMATTER='pxfdelimited_import' +The `hdfs:json` profile supports the following custom **write** options: - +| Option | Value Description | +|-------|-------------------------------------| +| ROOT=\ | When writing to a single JSON object, identifies the name of the root-level object attribute. | +| COMPRESSION_CODEC | The compression codec alias. Supported compression codecs for writing json data include: `default`, `bzip2`, `gzip`, and `uncompressed`. If this option is not provided, Greenplum Database performs no data compression. | +| DISTRIBUTED BY | If you are loading data from an existing Greenplum Database table into the writable external table, consider specifying the same distribution policy or `` on both tables. Doing so will avoid extra motion of data between segments on the load operation. | -| JSON Data Type | PXF/Greenplum Data Type | -|-------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Primitive type (integer, float, string, boolean, null) | Use the corresponding Greenplum Database built-in data type; see [Greenplum Database Data Types](https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/ref_guide-data_types.html). | -| Array | Use `TEXT[]` to retrieve the JSON array as a Greenplum text array. | -| Object | Use dot `.` notation to specify each level of projection (nesting) to a member of a primitive or Array type. | +When you specify compression for a JSON write operation, PXF names the files that it writes `..`. For example: `jan_sales.jsonl.gz`. -### JSON Data Read Modes +## Read Examples -PXF supports two data read modes. The default mode expects one full JSON record per line. PXF also supports a read mode operating on JSON records that span multiple lines. +### Example Data Sets -In upcoming examples, you will use both read modes to operate on a sample data set. The schema of the sample data set defines objects with the following member names and value data types: +In upcoming read examples, you use both JSON access modes to operate on a sample data set. The schema of the sample data set defines objects with the following member names and value data types: - - "created_at" - text - - "id_str" - text - - "user" - object - - "id" - integer - - "location" - text - - "coordinates" - object (optional) - - "type" - text - - "values" - array - - [0] - integer - - [1] - integer +- "created_at" - text +- "id_str" - text +- "user" - object + - "id" - integer + - "location" - text +- "coordinates" - object (optional) -The single-JSON-record-per-line data set follows: + - "type" - text + - "values" - array + + - [0] - integer + - [1] - integer + +The data set for the single-object-per-row (JSONL) access mode follows: ``` pre {"created_at":"FriJun0722:45:03+00002013","id_str":"343136551322136576","user":{"id":395504494,"location":"NearCornwall"},"coordinates":{"type":"Point","values": [ 6, 50 ]}}, @@ -112,7 +243,7 @@ The single-JSON-record-per-line data set follows: {"created_at":"FriJun0722:45:02+00002013","id_str":"343136547136233472","user":{"id":287819058,"location":""}, "coordinates": null} ``` -This is the data set for the multi-line JSON record data set: +The data set for the single-object-per-file JSON access mode follows: ``` json { @@ -149,137 +280,88 @@ This is the data set for the multi-line JSON record data set: You will create JSON files for the sample data sets and add them to HDFS in the next section. -## Loading the Sample JSON Data to HDFS +### Loading the Sample JSON Data to HDFS -The PXF HDFS connector reads native JSON stored in HDFS. Before you can use Greenplum Database to query JSON format data, the data must reside in your HDFS data store. +The PXF HDFS connector can read and write native JSON stored in HDFS. -Copy and paste the single line JSON record sample data set above to a file named `singleline.json`. Similarly, copy and paste the multi-line JSON record data set to a file named `multiline.json`. +Copy and paste the object-per-row JSON sample data set above to a file named `objperrow.jsonl`. Similarly, copy and paste the single object per file JSON record data set to a file named `singleobj.json`. -**Note**: Ensure that there are **no** blank lines in your JSON files. +> **Note** Ensure that there are **no** blank lines in your JSON files. Copy the JSON data files that you just created to your HDFS data store. Create the `/data/pxf_examples` directory if you did not do so in a previous exercise. For example: ``` shell $ hdfs dfs -mkdir /data/pxf_examples -$ hdfs dfs -put singleline.json /data/pxf_examples -$ hdfs dfs -put multiline.json /data/pxf_examples -``` - -Once the data is loaded to HDFS, you can use Greenplum Database and PXF to query and analyze the JSON data. - - -## Creating the External Table - -Use the `hdfs:json` profile to read JSON-format files from HDFS. The following syntax creates a Greenplum Database readable external table that references such a file: - -``` sql -CREATE EXTERNAL TABLE - ( [, ...] | LIKE ) -LOCATION ('pxf://?PROFILE=hdfs:json[&SERVER=][&=[...]]') -FORMAT 'CUSTOM' (FORMATTER='pxfwritable_import'); +$ hdfs dfs -put objperrow.jsonl /data/pxf_examples/ +$ hdfs dfs -put singleobj.json /data/pxf_examples/ ``` -The specific keywords and values used in the Greenplum Database [CREATE EXTERNAL TABLE](https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/ref_guide-sql_commands-CREATE_EXTERNAL_TABLE.html) command are described in the table below. - -| Keyword | Value | -|-------|-------------------------------------| -| \ | The path to the directory or file in the HDFS data store. When the `` configuration includes a [`pxf.fs.basePath`](cfg_server.html#pxf-fs-basepath) property setting, PXF considers \ to be relative to the base path specified. Otherwise, PXF considers it to be an absolute path. \ must not specify a relative path nor include the dollar sign (`$`) character. | -| PROFILE | The `PROFILE` keyword must specify `hdfs:json`. | -| SERVER=\ | The named server configuration that PXF uses to access the data. PXF uses the `default` server if not specified. | -| \ | \s are discussed below.| -| FORMAT 'CUSTOM' | Use `FORMAT` `'CUSTOM'` with the `hdfs:json` profile. The `CUSTOM` `FORMAT` requires that you specify `(FORMATTER='pxfwritable_import')`. | - - -PXF supports single- and multi- line JSON records. When you want to read multi-line JSON records, you must provide an `IDENTIFIER` \ and value. Use this \ to identify the name of a field whose parent JSON object you want to be returned as individual tuples. - -The `hdfs:json` profile supports the following \s: - -| Option Keyword |   Syntax,  Example(s)   | Description | -|-------|--------------|-----------------------| -| IDENTIFIER | `&IDENTIFIER=`
`&IDENTIFIER=created_at`| You must include the `IDENTIFIER` keyword and \ in the `LOCATION` string only when you are accessing JSON data comprised of multi-line records. Use the \ to identify the name of the field whose parent JSON object you want to be returned as individual tuples. | -| SPLIT_BY_FILE | `&SPLIT_BY_FILE=` | Specify how PXF splits the data in \. The default value is `false`, PXF creates multiple splits for each file that it will process in parallel. When set to `true`, PXF creates and processes a single split per file. | -| IGNORE_MISSING_PATH | `&IGNORE_MISSING_PATH=` | Specify the action to take when \ is missing or invalid. The default value is `false`, PXF returns an error in this situation. When the value is `true`, PXF ignores missing path errors and returns an empty fragment. | - -
Note: When a nested object in a multi-line record JSON file includes a field with the same name as that of a parent object field and the field name is also specified as the IDENTIFIER, there is a possibility that PXF could return incorrect results. Should you need to, you can work around this edge case by compressing the JSON file, and having PXF read the compressed file.
- +Once the data is loaded to HDFS, you can use Greenplum Database and PXF to query and add to the JSON data. -## Example: Reading a JSON File with Single Line Records +### Example: Single Object Per Row (Read) -Use the following [CREATE EXTERNAL TABLE](https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/ref_guide-sql_commands-CREATE_EXTERNAL_TABLE.html) SQL command to create a readable external table that references the single-line-per-record JSON data file and uses the PXF default server. +Use the following [CREATE EXTERNAL TABLE](https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/ref_guide-sql_commands-CREATE_EXTERNAL_TABLE.html) SQL command to create a readable external table that references the single-object-per-row JSON data file and uses the PXF default server. ```sql -CREATE EXTERNAL TABLE singleline_json_tbl( +CREATE EXTERNAL TABLE objperrow_json_tbl( created_at TEXT, id_str TEXT, "user.id" INTEGER, "user.location" TEXT, - "coordinates.values" TEXT[] + "coordinates.values" INTEGER[] ) -LOCATION('pxf://data/pxf_examples/singleline.json?PROFILE=hdfs:json') +LOCATION('pxf://data/pxf_examples/objperrow.jsonl?PROFILE=hdfs:json') FORMAT 'CUSTOM' (FORMATTER='pxfwritable_import'); ``` -Notice the use of `.` projection to access the nested fields in the `user` and `coordinates` objects. +This table reads selected fields in the JSON file. Notice the use of `.` projection to access the nested fields in the `user` and `coordinates` objects. -To query the JSON data in the external table: +To view the JSON data in the file, query the external table: ``` sql -SELECT * FROM singleline_json_tbl; +SELECT * FROM objperrow_json_tbl; ``` To access specific elements of the `coordinates.values` array, you can specify the array subscript number in square brackets: ```sql -SELECT "coordinates.values"[1], "coordinates.values"[2] FROM singleline_json_tbl; -``` - -To access the array elements as some type other than `TEXT`, you can either cast the whole column: - -```sql -SELECT "coordinates.values"::int[] FROM singleline_json_tbl; -``` - -or cast specific elements: - -```sql -SELECT "coordinates.values"[1]::int, "coordinates.values"[2]::float FROM singleline_json_tbl; +SELECT "coordinates.values"[1], "coordinates.values"[2] FROM objperrow_json_tbl; ``` +### Example: Single Object Per File (Read) -## Example: Reading a JSON file with Multi-Line Records - -The SQL command to create a readable external table from the multi-line-per-record JSON file is very similar to that of the single line data set above. You must additionally specify the `LOCATION` clause `IDENTIFIER` keyword and an associated value when you want to read multi-line JSON records. For example: +The SQL command to create a readable external table for a single object JSON file is very similar to that of the single object per row data set above. You must additionally specify the `LOCATION` clause `IDENTIFIER` keyword and an associated value. For example: ``` sql -CREATE EXTERNAL TABLE multiline_json_tbl( +CREATE EXTERNAL TABLE singleobj_json_tbl( created_at TEXT, id_str TEXT, "user.id" INTEGER, "user.location" TEXT, - "coordinates.values" TEXT[] + "coordinates.values" INTEGER[] ) -LOCATION('pxf://data/pxf_examples/multiline.json?PROFILE=hdfs:json&IDENTIFIER=created_at') +LOCATION('pxf://data/pxf_examples/singleobj.json?PROFILE=hdfs:json&IDENTIFIER=created_at') FORMAT 'CUSTOM' (FORMATTER='pxfwritable_import'); ``` `created_at` identifies the member name of the first field in the JSON record `record_obj` in the sample data schema. -To query the JSON data in this external table: +To view the JSON data in the file, query the external table: ``` sql -SELECT * FROM multiline_json_tbl; +SELECT * FROM singleobj_json_tbl; ``` -## Other Methods to Read a JSON Array +### Other Methods to Read a JSON Array Starting in version 6.2.0, PXF supports reading a JSON array into a `TEXT[]` column. PXF still supports the old methods of using array element projection or a single text-type column to read a JSON array. These access methods are described here. -### Using Array Element Projection +#### Using Array Element Projection PXF supports accessing specific elements of a JSON array using the syntax `[n]` in the table definition to identify the specific element. ```sql -CREATE EXTERNAL TABLE singleline_json_tbl_aep( +CREATE EXTERNAL TABLE objperrow_json_tbl_aep( created_at TEXT, id_str TEXT, "user.id" INTEGER, @@ -287,7 +369,7 @@ CREATE EXTERNAL TABLE singleline_json_tbl_aep( "coordinates.values[0]" INTEGER, "coordinates.values[1]" INTEGER ) -LOCATION('pxf://data/pxf_examples/singleline.json?PROFILE=hdfs:json') +LOCATION('pxf://data/pxf_examples/objperrow.jsonl?PROFILE=hdfs:json') FORMAT 'CUSTOM' (FORMATTER='pxfwritable_import'); ``` @@ -296,24 +378,24 @@ FORMAT 'CUSTOM' (FORMATTER='pxfwritable_import'); If your existing external table definition uses array element projection and you want to read the array into a `TEXT[]` column, you can use the `ALTER EXTERNAL TABLE` command to update the table definition. For example: ```sql -ALTER EXTERNAL TABLE singleline_json_tbl_aep DROP COLUMN "coordinates.values[0]", DROP COLUMN "coordinates.values[1]", ADD COLUMN "coordinates.values" TEXT[]; +ALTER EXTERNAL TABLE objperrow_json_tbl_aep DROP COLUMN "coordinates.values[0]", DROP COLUMN "coordinates.values[1]", ADD COLUMN "coordinates.values" TEXT[]; ``` If you choose to alter the external table definition in this manner, be sure to update any existing queries on the external table to account for the changes to column name and type. -### Specifying a Single Text-type Column +#### Specifying a Single Text-type Column PXF supports accessing all of the elements within an array as a single string containing the serialized JSON array by defining the corresponding Greenplum table column with one of the following data types: `TEXT`, `VARCHAR`, or `BPCHAR`. ```sql -CREATE EXTERNAL TABLE singleline_json_tbl_stc( +CREATE EXTERNAL TABLE objperrow_json_tbl_stc( created_at TEXT, id_str TEXT, "user.id" INTEGER, "user.location" TEXT, "coordinates.values" TEXT ) -LOCATION('pxf://data/pxf_examples/singleline.json?PROFILE=hdfs:json') +LOCATION('pxf://data/pxf_examples/objperrow.jsonl?PROFILE=hdfs:json') FORMAT 'CUSTOM' (FORMATTER='pxfwritable_import'); ``` @@ -322,7 +404,7 @@ If you retrieve the JSON array in a single text-type column and wish to convert ```sql SELECT user.id, ARRAY(SELECT json_array_elements_text(coordinates.values::json))::int[] AS coords -FROM singleline_json_tbl_stc; +FROM objperrow_json_tbl_stc; ``` **Note**: This conversion is possible only when you are using PXF with Greenplum Database 6.x; the function `json_array_elements_text()` is not available in Greenplum 5.x. @@ -330,8 +412,147 @@ FROM singleline_json_tbl_stc; If your external table definition uses a single text-type column for a JSON array and you want to read the array into a `TEXT[]` column, you can use the `ALTER EXTERNAL TABLE` command to update the table definition. For example: ```sql -ALTER EXTERNAL TABLE singleline_json_tbl_stc ALTER COLUMN "coordinates.values" TYPE TEXT[]; +ALTER EXTERNAL TABLE objperrow_json_tbl_stc ALTER COLUMN "coordinates.values" TYPE TEXT[]; ``` If you choose to alter the external table definition in this manner, be sure to update any existing queries on the external table to account for the change in column type. +## Writing JSON Data + +To write JSON data, you create a writable external table that references the name of a directory on HDFS. When you insert records into the writable external table, PXF writes the block(s) of data that you insert to one or more files in the directory that you specified. In the default case (single object per row), PXF writes the data to a `.jsonl` file. When you specify a `ROOT` attribute (single object per file), PXF writes to a `.json` file. + +> **Note** When writing JSON data, PXF supports only scalar or one dimensional arrays of Greenplum data types. PXF does not support column projection when writing JSON data. + +Writable external tables can only be used for `INSERT` operations. If you want to query the data that you inserted, you must create a separate readable external table that references the HDFS directory and read from that table. + +The write examples use a data schema similar to that of the read examples. + +### Example: Single Object Per Row (Write) + +In this example, we add data to a directory named `jsopr`. + +Use the following [CREATE EXTERNAL TABLE](https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/ref_guide-sql_commands-CREATE_EXTERNAL_TABLE.html) SQL command to create a writable external table that writes JSON data in single-object-per-row format and uses the PXF default server. + +```sql +CREATE WRITABLE EXTERNAL TABLE add_objperrow_json_tbl( + created_at TEXT, + id_str TEXT, + id INTEGER, + location TEXT, + coordinates INTEGER[] +) +LOCATION('pxf://data/pxf_examples/jsopr?PROFILE=hdfs:json') +FORMAT 'CUSTOM' (FORMATTER='pxfwritable_export'); +``` + +Write data to the table: + +``` sql +INSERT INTO add_objperrow_json_tbl VALUES ( 'SunJun0912:59:07+00002013', '343136551111111111', 311111111, 'FarAway', '{ 6, 50 }' ); +INSERT INTO add_objperrow_json_tbl VALUES ( 'MonJun1002:12:06+00002013', '343136557777777777', 377777777, 'NearHere', '{ 13, 93 }' ); +``` + +Read the data that you just wrote. Recall that you must first create a readable external table: + +``` sql +CREATE EXTERNAL TABLE jsopr_tbl( + created_at TEXT, + id_str TEXT, + id INTEGER, + location TEXT, + coordinates INTEGER[] +) +LOCATION('pxf://data/pxf_examples/jsopr?PROFILE=hdfs:json') +FORMAT 'CUSTOM' (FORMATTER='pxfwritable_import'); +``` + +Query the table: + +``` sql +SELECT * FROM jsopr_tbl; + + created_at | id_str | id | location | coordinates +---------------------------+--------------------+-----------+----------+------------- + MonJun1002:12:06+00002013 | 343136557777777777 | 377777777 | NearHere | {13,93} + SunJun0912:59:07+00002013 | 343136551111111111 | 311111111 | FarAway | {6,50} +(2 rows) +``` + +View the files added to HDFS: + +``` +$ hdfs dfs -cat /data/pxf_examples/jsopr/* +{"created_at":"SunJun0912:59:07+00002013","id_str":"343136551111111111","id":311111111,"location":"FarAway","coordinates":[6,50]} +{"created_at":"MonJun1002:12:06+00002013","id_str":"343136557777777777","id":377777777,"location":"NearHere","coordinates":[13,93]} +``` + +Notice that PXF creates a flat JSON structure. + +### Example: Single Object Per File (Write) + +Use the following [CREATE EXTERNAL TABLE](https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/ref_guide-sql_commands-CREATE_EXTERNAL_TABLE.html) SQL command to create a writable external table that writes JSON data in single object format and uses the PXF default server. + +You must specify the `ROOT` keyword and associated value in the `LOCATION` clause. For example: + +``` sql +CREATE WRITABLE EXTERNAL TABLE add_singleobj_json_tbl( + created_at TEXT, + id_str TEXT, + id INTEGER, + location TEXT, + coordinates INTEGER[] +) +LOCATION('pxf://data/pxf_examples/jso?PROFILE=hdfs:json&ROOT=root') +FORMAT 'CUSTOM' (FORMATTER='pxfwritable_export'); +``` + +`root` identifies the name of the root attribute of the single object. + +Write data to the table: + +``` sql +INSERT INTO add_singleobj_json_tbl VALUES ( 'SunJun0912:59:07+00002013', '343136551111111111', 311111111, 'FarAway', '{ 6, 50 }' ); +INSERT INTO add_singleobj_json_tbl VALUES ( 'WedJun1212:37:02+00002013', '333333333333333333', 333333333, 'NetherWorld', '{ 9, 63 }' ); +``` + +Read the data that you just wrote. Recall that you must first create a new readable external table: + +``` sql +CREATE EXTERNAL TABLE jso_tbl( + created_at TEXT, + id_str TEXT, + id INTEGER, + location TEXT, + coordinates INTEGER[] +) +LOCATION('pxf://data/pxf_examples/jso?PROFILE=hdfs:json&IDENTIFIER=created_at') +FORMAT 'CUSTOM' (FORMATTER='pxfwritable_import'); + +The column names that you specify in the create command must match those of the writable external table. And recall that to read a JSON file that contains a single object, you must specify the `IDENTIFIER` option. + +Query the table to read the data: + +``` sql +SELECT * FROM jso_tbl; + + created_at | id_str | id | location | coo +rdinates +---------------------------+--------------------+-----------+--------------+---- +--------- + WedJun1212:37:02+00002013 | 333333333333333333 | 333333333 | NetherWorld | {9,63} + SunJun0912:59:07+00002013 | 343136551111111111 | 311111111 | FarAway | {6,50} +(2 rows) +``` + +View the files added to HDFS: + +``` +$ hdfs dfs -cat /data/pxf_examples/jso/* +{"root":[ +{"created_at":"SunJun0912:59:07+00002013","id_str":"343136551111111111","id":311111111,"location":"FarAway","coordinates":[6,50]} +]} +{"root":[ +{"created_at":"WedJun1212:37:02+00002013","id_str":"333333333333333333","id":333333333,"location":"NetherWorld","coordinates":[9,63]} +]} +``` + diff --git a/docs/content/hdfs_text.html.md.erb b/docs/content/hdfs_text.html.md.erb index 4ddd903123..b5ca885b2c 100644 --- a/docs/content/hdfs_text.html.md.erb +++ b/docs/content/hdfs_text.html.md.erb @@ -247,12 +247,11 @@ The specific keywords and values used in the [CREATE EXTERNAL TABLE](https://doc | delimiter | The delimiter character in the data. For `FORMAT` `'CSV'`, the default \ is a comma (`,`). Preface the \ with an `E` when the value is an escape sequence. Examples: `(delimiter=E'\t')`, `(delimiter ':')`. | | DISTRIBUTED BY | If you want to load data from an existing Greenplum Database table into the writable external table, consider specifying the same distribution policy or `` on both tables. Doing so will avoid extra motion of data between segments on the load operation. | -Writable external tables that you create using the `hdfs:text` or the `hdfs:csv` profiles can optionally use record or block compression. You specify the compression type and codec via custom options in the `CREATE EXTERNAL TABLE` `LOCATION` clause. The `hdfs:text` and `hdfs:csv` profiles support the following custom write options: +Writable external tables that you create using the `hdfs:text` or the `hdfs:csv` profiles can optionally use record or block compression. You specify the compression codec via a custom option in the `CREATE EXTERNAL TABLE` `LOCATION` clause. The `hdfs:text` and `hdfs:csv` profiles support the following custom write option: | Option | Value Description | |-------|-------------------------------------| | COMPRESSION_CODEC | The compression codec alias. Supported compression codecs for writing text data include: `default`, `bzip2`, `gzip`, and `uncompressed`. If this option is not provided, Greenplum Database performs no data compression. | -| COMPRESSION_TYPE | The compression type to employ; supported values are `RECORD` (the default) or `BLOCK`. | ### Example: Writing Text Data to HDFS diff --git a/docs/content/nfs_pxf.html.md.erb b/docs/content/nfs_pxf.html.md.erb index de22c69237..4afbed0c74 100644 --- a/docs/content/nfs_pxf.html.md.erb +++ b/docs/content/nfs_pxf.html.md.erb @@ -11,7 +11,7 @@ You can use PXF to read data that resides on a network file system mounted on yo | delimited text with quoted linefeeds | file:text:multi | read | | fixed width single line text | file:fixedwidth | read, write | | Avro | file:avro | read, write | -| JSON | file:json | read | +| JSON | file:json | read, write | | ORC | file:orc | read, write | | Parquet | file:parquet | read, write | diff --git a/docs/content/objstore_fixedwidth.html.md.erb b/docs/content/objstore_fixedwidth.html.md.erb index 15988e70eb..0865c67a8c 100644 --- a/docs/content/objstore_fixedwidth.html.md.erb +++ b/docs/content/objstore_fixedwidth.html.md.erb @@ -164,12 +164,11 @@ The specific keywords and values used in the [CREATE EXTERNAL TABLE](https://doc | line_delim | The line delimiter character in the data. Preface the \ with an `E` when the value is an escape sequence. Examples: `line_delim=E'\n'`, `line_delim 'aaa'`. The default value is `'\n'`. | | DISTRIBUTED BY | If you want to load data from an existing Greenplum Database table into the writable external table, consider specifying the same distribution policy or `` on both tables. Doing so will avoid extra motion of data between segments on the load operation. | -Writable external tables that you create using the `:fixedwidth` profile can optionally use record or block compression. You specify the compression type and codec via options in the `CREATE WRITABLE EXTERNAL TABLE` `LOCATION` clause: +Writable external tables that you create using the `:fixedwidth` profile can optionally use record or block compression. You specify the compression codec via an option in the `CREATE WRITABLE EXTERNAL TABLE` `LOCATION` clause: | Write Option | Value Description | |-------|-------------------------------------| | COMPRESSION_CODEC | The compression codec alias. Supported compression codecs for writing fixed-width text data include: `default`, `bzip2`, `gzip`, and `uncompressed`. If this option is not provided, Greenplum Database performs no data compression. | -| COMPRESSION_TYPE | The compression type to employ; supported values are `RECORD` (the default) or `BLOCK`. | ## Example: Writing Fixed-Width Text Data to S3 diff --git a/docs/content/objstore_json.html.md.erb b/docs/content/objstore_json.html.md.erb index 2a00a5ce97..60f6955b9c 100644 --- a/docs/content/objstore_json.html.md.erb +++ b/docs/content/objstore_json.html.md.erb @@ -1,5 +1,5 @@ --- -title: Reading JSON Data from an Object Store +title: Reading and Writing JSON Data in an Object Store --- -The PXF object store connectors support reading JSON-format data. This section describes how to use PXF to access JSON data in an object store, including how to create and query an external table that references a JSON file in the store. +The PXF object store connectors support reading and writing JSON-format data. This section describes how to use PXF and external tables to access and write JSON data in an object store. -**Note**: Accessing JSON-format data from an object store is very similar to accessing JSON-format data in HDFS. This topic identifies object store-specific information required to read JSON data, and links to the [PXF HDFS JSON documentation](hdfs_json.html) where appropriate for common information. +**Note**: Accessing JSON-format data from an object store is very similar to accessing JSON-format data in HDFS. This topic identifies object store-specific information required to read and write JSON data, and links to the [PXF HDFS JSON documentation](hdfs_json.html) where appropriate for common information. ## Prerequisites @@ -33,9 +33,13 @@ Ensure that you have met the PXF Object Store [Prerequisites](access_objstore.ht Refer to [Working with JSON Data](hdfs_json.html#hdfsjson_work) in the PXF HDFS JSON documentation for a description of the JSON text-based data-interchange format. +## Data Type Mapping + +Refer to [Data Type Mapping](hdfs_json.html#datatypemap) in the PXF HDFS JSON documentation for a description of the JSON to Greenplum and Greenplum to JSON type mappings. + ## Creating the External Table -Use the `:json` profile to read JSON-format files from an object store. PXF supports the following `` profile prefixes: +Use the `:json` profile to read or write JSON-format files in an object store. PXF supports the following `` profile prefixes: | Object Store | Profile Prefix | |-------|-------------------------------------| @@ -45,13 +49,14 @@ Use the `:json` profile to read JSON-format files from an object store | MinIO | s3 | | S3 | s3 | -The following syntax creates a Greenplum Database readable external table that references a JSON-format file: +The following syntax creates a Greenplum Database external table that references JSON-format data: ``` sql -CREATE EXTERNAL TABLE +CREATE [WRITABLE] EXTERNAL TABLE ( [, ...] | LIKE ) LOCATION ('pxf://?PROFILE=:json&SERVER=[&=[...]]') -FORMAT 'CUSTOM' (FORMATTER='pxfwritable_import'); +FORMAT 'CUSTOM' (FORMATTER='pxfwritable_import'|'pxfwritable_export') +[DISTRIBUTED BY ( [, ... ] ) | DISTRIBUTED RANDOMLY]; ``` The specific keywords and values used in the Greenplum Database [CREATE EXTERNAL TABLE](https://docs.vmware.com/en/VMware-Greenplum/6/greenplum-database/ref_guide-sql_commands-CREATE_EXTERNAL_TABLE.html) command are described in the table below. @@ -61,33 +66,32 @@ The specific keywords and values used in the Greenplum Database [CREATE EXTERNAL | \ | The path to the directory or file in the object store. When the `` configuration includes a [`pxf.fs.basePath`](cfg_server.html#pxf-fs-basepath) property setting, PXF considers \ to be relative to the base path specified. Otherwise, PXF considers it to be an absolute path. \ must not specify a relative path nor include the dollar sign (`$`) character. | | PROFILE=\:json | The `PROFILE` keyword must identify the specific object store. For example, `s3:json`. | | SERVER=\ | The named server configuration that PXF uses to access the data. | -| \=\ | JSON supports the custom option named `IDENTIFIER` as described in the [PXF HDFS JSON documentation](hdfs_json.html#customopts). | -| FORMAT 'CUSTOM' | Use `FORMAT` `'CUSTOM'` with the `:json` profile. The `CUSTOM` `FORMAT` requires that you specify `(FORMATTER='pxfwritable_import')`. | +| \=\ | JSON supports the custom options described in the [PXF HDFS JSON documentation](hdfs_json.html#customopts). | +| FORMAT 'CUSTOM' | Use `FORMAT` `'CUSTOM'` with `(FORMATTER='pxfwritable_export')` (write) or `(FORMATTER='pxfwritable_import')` (read). | If you are accessing an S3 object store, you can provide S3 credentials via custom options in the `CREATE EXTERNAL TABLE` command as described in [Overriding the S3 Server Configuration with DDL](access_s3.html#s3_override). -## Example +## Read Example -Refer to [Loading the Sample JSON Data to HDFS](hdfs_json.html#jsontohdfs) and [Example: Reading a JSON File with Single Line Records](hdfs_json.html#jsonexample1) in the PXF HDFS JSON documentation for a JSON example. Modifications that you must make to run the example with an object store include: +Refer to [Loading the Sample JSON Data to HDFS](hdfs_json.html#jsontohdfs) and the [Read Example](hdfs_json.html#read_example1) in the PXF HDFS JSON documentation for a JSON read example. Modifications that you must make to run the example with an object store include: - Copying the file to the object store instead of HDFS. For example, to copy the file to S3: ``` shell - $ aws s3 cp /tmp/singleline.json s3://BUCKET/pxf_examples/ - $ aws s3 cp /tmp/multiline.json s3://BUCKET/pxf_examples/ + $ aws s3 cp /tmp/objperrow.jsonl s3://BUCKET/pxf_examples/ ``` - Using the `CREATE EXTERNAL TABLE` syntax and `LOCATION` keywords and settings described above. For example, if your server name is `s3srvcfg`: ``` sql - CREATE EXTERNAL TABLE singleline_json_s3( + CREATE EXTERNAL TABLE objperrow_json_s3( created_at TEXT, id_str TEXT, "user.id" INTEGER, "user.location" TEXT, - "coordinates.values" TEXT[] + "coordinates.values" INTEGER[] ) - LOCATION('pxf://BUCKET/pxf_examples/singleline.json?PROFILE=s3:json&SERVER=s3srvcfg') + LOCATION('pxf://BUCKET/pxf_examples/objperrow.jsonl?PROFILE=s3:json&SERVER=s3srvcfg') FORMAT 'CUSTOM' (FORMATTER='pxfwritable_import'); ``` @@ -97,3 +101,34 @@ Refer to [Loading the Sample JSON Data to HDFS](hdfs_json.html#jsontohdfs) and [ SELECT "coordinates.values"[1], "coordinates.values"[2] FROM singleline_json_s3; ``` +## Write Example + +Refer to the [Writing JSON Data](hdfs_json.html#json_write) in the PXF HDFS JSON documentation for write examples. Modifications that you must make to run the single-object-per-row write example with an object store include: + +- Using the `CREATE WRITABLE EXTERNAL TABLE` syntax and `LOCATION` keywords and settings described above. For example, if your server name is `s3srvcfg`: + + ``` sql + CREATE WRITABLE EXTERNAL TABLE add_objperrow_json_s3( + created_at TEXT, + id_str TEXT, + id INTEGER, + location TEXT, + coordinates INTEGER[] + ) + LOCATION('pxf://BUCKET/pxf_examples/jsopr?PROFILE=s3:json&SERVER=s3srvcfg') + FORMAT 'CUSTOM' (FORMATTER='pxfwritable_export'); + ``` + +- Using the `CREATE EXTERNAL TABLE` syntax and `LOCATION` keywords and settings described above to read the data back. For example, if your server name is `s3srvcfg`: + + ``` sql + CREATE EXTERNAL TABLE jsopr_tbl( + created_at TEXT, + id_str TEXT, + id INTEGER, + location TEXT, + coordinates INTEGER[] + ) + LOCATION('pxf://BUCKET/pxf_examples/jsopr?PROFILE=s3:json') + FORMAT 'CUSTOM' (FORMATTER='pxfwritable_import'); + ``` diff --git a/docs/content/objstore_text.html.md.erb b/docs/content/objstore_text.html.md.erb index fb5052513a..b07c7c467d 100644 --- a/docs/content/objstore_text.html.md.erb +++ b/docs/content/objstore_text.html.md.erb @@ -277,12 +277,11 @@ The specific keywords and values used in the [CREATE EXTERNAL TABLE](https://doc | delimiter | The delimiter character in the data. For `FORMAT` `'CSV'`, the default \ is a comma (`,`). Preface the \ with an `E` when the value is an escape sequence. Examples: `(delimiter=E'\t')`, `(delimiter ':')`. | | DISTRIBUTED BY | If you want to load data from an existing Greenplum Database table into the writable external table, consider specifying the same distribution policy or `` on both tables. Doing so will avoid extra motion of data between segments on the load operation. | -Writable external tables that you create using an `:text|csv` profile can optionally use record or block compression. You specify the compression type and codec via custom options in the `CREATE EXTERNAL TABLE` `LOCATION` clause. The `:text|csv` profiles support the following custom write options: +Writable external tables that you create using an `:text|csv` profile can optionally use record or block compression. You specify the compression codec via a custom option in the `CREATE EXTERNAL TABLE` `LOCATION` clause. The `:text|csv` profiles support the following custom write options: | Option | Value Description | |-------|-------------------------------------| | COMPRESSION_CODEC | The compression codec alias. Supported compression codecs for writing text data include: `default`, `bzip2`, `gzip`, and `uncompressed`. If this option is not provided, Greenplum Database performs no data compression. | -| COMPRESSION_TYPE | The compression type to employ; supported values are `RECORD` (the default) or `BLOCK`. | If you are accessing an S3 object store, you can provide S3 credentials via custom options in the `CREATE EXTERNAL TABLE` command as described in [Overriding the S3 Server Configuration with DDL](access_s3.html#s3_override). From 4f98fcf135ab3ad83f271457177c69240096d986 Mon Sep 17 00:00:00 2001 From: Lisa Owen Date: Thu, 13 Jul 2023 09:42:26 -0600 Subject: [PATCH 30/35] docs - enhanced processing for orc numeric overflow conditions (#998) * docs - enhanced processing for orc numeric overflow conditions * address most comments from yiming * revert back to original statement in upgrade steps * misc edits from ashuka (issues trying to direct commit) * reword the opening paragraph of overflow topic --- docs/content/cfg_server.html.md.erb | 3 ++- docs/content/hdfs_orc.html.md.erb | 24 ++++++++++++++++++++++++ docs/content/hdfs_parquet.html.md.erb | 14 ++++++++------ docs/content/upgrade_6.html.md.erb | 13 +++++++++++++ 4 files changed, 47 insertions(+), 7 deletions(-) diff --git a/docs/content/cfg_server.html.md.erb b/docs/content/cfg_server.html.md.erb index 23c5eed3fe..fae771b19d 100644 --- a/docs/content/cfg_server.html.md.erb +++ b/docs/content/cfg_server.html.md.erb @@ -92,7 +92,7 @@ PXF includes a template file named `pxf-site.xml` for PXF-specific configuration - Kerberos and/or user impersonation settings for server configurations - a base directory for file access -- the action of PXF when it detects an overflow condition while writing numeric Parquet data +- the action of PXF when it detects an overflow condition while writing numeric ORC or Parquet data
Note: The Kerberos and user impersonation settings in this file may apply only to Hadoop and JDBC server configurations; they do not apply to file system or object store server configurations.
@@ -116,6 +116,7 @@ You configure properties in the `pxf-site.xml` file for a PXF server when one or | pxf.fs.basePath | Identifies the base path or share point on the remote file system. This property is applicable when the server configuration is used with a profile that accesses a file. | None; this property is commented out by default. | | pxf.ppd.hive1 | Specifies whether or not predicate pushdown is enabled for queries on external tables that specify the `hive`, `hive:rc`, or `hive:orc` profiles. | True; predicate pushdown is enabled. | | pxf.sasl.connection.retries | Specifies the maximum number of times that PXF retries a SASL connection request after a refused connection returns a `GSS initiate failed` error. | 5 | +| pxf.orc.write.decimal.overflow | Specifies how PXF handles numeric data that exceeds the maximum precision of 38 and [overflows](hdfs_orc.html#overflow) when writing to an ORC file. Valid values are: round, error, or ignore | round | | pxf.parquet.write.decimal.overflow | Specifies how PXF handles numeric data that exceeds the maximum precision of 38 and [overflows](hdfs_parquet.html#overflow) when writing to a Parquet file. Valid values are: round, error, or ignore | round |
1 Should you need to, you can override this setting on a per-table basis by specifying the `&PPD=` option in the `LOCATION` clause when you create the external table. diff --git a/docs/content/hdfs_orc.html.md.erb b/docs/content/hdfs_orc.html.md.erb index a7b09ffe5a..f429ee2be4 100644 --- a/docs/content/hdfs_orc.html.md.erb +++ b/docs/content/hdfs_orc.html.md.erb @@ -301,3 +301,27 @@ In this example, you create a writable external table to write some data to the postgres=# SELECT * FROM sample_orc ORDER BY num_orders; ``` +## Understanding Overflow Conditions When Writing Numeric Data + +PXF uses the `HiveDecimal` class to write numeric ORC data. In versions prior to 6.7.0, PXF limited only the precision of a numeric type to a maximum of 38. In versions 6.7.0 and later, PXF must meet both precision and scale requirements before writing numeric ORC data. + +When you define a `NUMERIC` column in an external table without specifying a precision or scale, PXF internally maps the column to a `DECIMAL(38, 10)`. + +PXF handles the following precision overflow conditions: + +- You define a `NUMERIC` column in the external table, and the integer digit count of a value exceeds the maximum supported precision of 38. For example, `1234567890123456789012345678901234567890.12345`, which has an integer digit count of 45. +- You define a `NUMERIC()` column with a `` greater than 38. For example, `NUMERIC(55)`. +- You define a `NUMERIC` column in the external table, and the integer digit count of a value is greater than 28 (38-10). For example, `123456789012345678901234567890.12345`, which has an integer digit count of 30. + +If you define a `NUMERIC(, )` column and the integer digit count of a value is greater than ` - `, PXF returns an error. For example, you define a `NUMERIC(20,4)` column and the value is `12345678901234567.12`, which has an integer digit count of 19, which is greater than 20-4=16. + +PXF can take one of three actions when it detects an overflow while writing numeric data to an ORC file: round the value (the default), return an error, or ignore the overflow. The `pxf.orc.write.decimal.overflow` property in the `pxf-site.xml` server configuration governs PXF's action in this circumstance; valid values for this property follow: + +| Value | PXF Action | +|-------|-------------------------------------| +| `round` | When PXF encounters an overflow, it attempts to round the value to meet both precision and scale requirements before writing. PXF reports an error if rounding fails. This may potentially leave an incomplete data set in the external system. `round` is the default. | +| `error` | PXF reports an error when it encounters an overflow, and the transaction fails. | +| `ignore` | PXF attempts to round the value to meet only the precision requirement and ignores validation of precision and scale; otherwise PXF writes a NULL value. (This was PXF's behavior prior to version 6.7.0.) | + +PXF logs a warning when it detects an overflow and the `pxf.orc.write.decimal.overflow` property is set to `ignore`. + diff --git a/docs/content/hdfs_parquet.html.md.erb b/docs/content/hdfs_parquet.html.md.erb index d45be2472c..26ee4817e9 100644 --- a/docs/content/hdfs_parquet.html.md.erb +++ b/docs/content/hdfs_parquet.html.md.erb @@ -251,19 +251,21 @@ PXF uses the `HiveDecimal` class to write numeric Parquet data. `HiveDecimal` li When you define a `NUMERIC` column in an external table without specifying a precision or scale, PXF internally maps the column to a `DECIMAL(38, 18)`. -A precision overflow condition can result when: +PXF handles the following precision overflow conditions: -- You define a `NUMERIC` column in the external table, and the integer digit count of a value exceeds maximum precision 38. For example, `1234567890123456789012345678901234567890.12345`, which has an integer digit count of 45. +- You define a `NUMERIC` column in the external table, and the integer digit count of a value exceeds the maximum supported precision of 38. For example, `1234567890123456789012345678901234567890.12345`, which has an integer digit count of 45. - You define a `NUMERIC()` column with a `` greater than 38. For example, `NUMERIC(55)`. -- You define a `NUMERIC(, )` column and the integer digit count of a value is greater than ` - `. For example, you define a `NUMERIC(20,4)` column and the value is `12345678901234567.12`, which has an integer digit count of 19, which is greater than 20-4=16. +- You define a `NUMERIC` column in the external table, and the integer digit count of a value is greater than 20 (38-18). For example, `123456789012345678901234567890.12345`, which has an integer digit count of 30. + +If you define a `NUMERIC(, )` column and the integer digit count of a value is greater than ` - `, PXF returns an error. For example, you define a `NUMERIC(20,4)` column and the value is `12345678901234567.12`, which has an integer digit count of 19, which is greater than 20-4=16. PXF can take one of three actions when it detects an overflow while writing numeric data to a Parquet file: round the value (the default), return an error, or ignore the overflow. The `pxf.parquet.write.decimal.overflow` property in the `pxf-site.xml` server configuration governs PXF's action in this circumstance; valid values for this property follow: | Value | PXF Action | |-------|-------------------------------------| -| `round` | When PXF encounters an overflow, it attempts to round the value before writing and logs a warning. PXF reports an error if rounding fails. This may potentially leave an incomplete data set in the external system. `round` is the default. | +| `round` | When PXF encounters an overflow, it attempts to round the value to meet both precision and scale requirements before writing. PXF reports an error if rounding fails. This may potentially leave an incomplete data set in the external system. `round` is the default. | | `error` | PXF reports an error when it encounters an overflow, and the transaction fails. | -| `ignore` | PXF writes a NULL value. (This was PXF's behavior prior to version 6.6.0.) | +| `ignore` | PXF attempts to round the value to meet both precision and scale requirements; otherwise PXF writes a NULL value. (This was PXF's behavior prior to version 6.6.0.) | -PXF always logs an warning when it detects an overflow, regardless of the `pxf.parquet.write.decimal.overflow` property setting. +PXF logs a warning when it detects an overflow and the `pxf.parquet.write.decimal.overflow` property is set to `ignore`. diff --git a/docs/content/upgrade_6.html.md.erb b/docs/content/upgrade_6.html.md.erb index 0e7f5d64e5..917398f8a7 100644 --- a/docs/content/upgrade_6.html.md.erb +++ b/docs/content/upgrade_6.html.md.erb @@ -127,6 +127,19 @@ After you install the new version of PXF, perform the following procedure: Refer to [Understanding Overflow Conditions When Writing Numeric Data](hdfs_parquet.html#overflow) for more information about how PXF uses this property to direct its actions when it encounters a numeric overflow while writing to a Parquet file. +1. **If you are upgrading from PXF version 6.6.x or earlier to PXF version 6.7.0 or later**, and you want to change the value of the new `pxf.orc.write.decimal.overflow` property from the default of `round`, add the following to the `pxf-site.xml` file for your PXF server: + + ``` pre + + pxf.orc.write.decimal.overflow + NEW_VALUE + + ``` + + where `NEW_VALUE` is `error` or `ignore`. + + Refer to [Understanding Overflow Conditions When Writing Numeric Data](hdfs_orc.html#overflow) for more information about how PXF uses this property to direct its actions when it encounters a numeric overflow while writing to an ORC file. + 1. **If you are upgrading from PXF version 6.6.x or earlier to PXF version 6.7.0 or later**, and you want to retain the previous PXF listen address (`0.0.0.0`), change the value of the `server.address` `pxf-application.properties` property as described in [Configuring the Listen Address](cfghostport.html#listen_address). 1. Synchronize the PXF configuration from the coordinator host to the standby coordinator host and each Greenplum Database segment host. For example: From ca1a7091c6ccf59d40910b628a8bd353459bc148 Mon Sep 17 00:00:00 2001 From: Yiming Li <99833827+yimingli-vmware@users.noreply.github.com> Date: Thu, 13 Jul 2023 10:18:04 -0700 Subject: [PATCH 31/35] Bump version to 6.7.0 (#999) Authored-by: Yiming Li --- .github/workflows/create-release-on-tag.yml | 22 +++++++++++++-------- CHANGELOG.md | 18 +++++++++++++++++ version | 2 +- 3 files changed, 33 insertions(+), 9 deletions(-) diff --git a/.github/workflows/create-release-on-tag.yml b/.github/workflows/create-release-on-tag.yml index 922c17d4ab..21fea525bd 100644 --- a/.github/workflows/create-release-on-tag.yml +++ b/.github/workflows/create-release-on-tag.yml @@ -22,17 +22,23 @@ jobs: tag_name: ${{ github.ref }} release_name: PXF Version ${{ github.ref }} body: | - ## 6.6.0 (04/06/2023) + ## 6.7.0 (07/13/2023) ### Enhancements: - - - [#949](https://github.com/greenplum-db/pxf/pull/949) Support for fixedwidth formatter with new `*:fixedwidth` PXF profiles - - [#954](https://github.com/greenplum-db/pxf/pull/954) Update table options names to not include dash character - - [#955](https://github.com/greenplum-db/pxf/pull/955) Bump jackson-databind from 2.13.4.1 to 2.13.4.2 in /automation - + + - [#956](https://github.com/greenplum-db/pxf/pull/956) Add pxfdelimited_import formatter to support multibyte delimiters for TEXT and CSV profiles + - [#960](https://github.com/greenplum-db/pxf/pull/960) Add support year with more than 4 digits in 'date' or 'timestamp' + - [#973](https://github.com/greenplum-db/pxf/pull/973) Enable write flow for FDW for non-text/csv formats + - [#976](https://github.com/greenplum-db/pxf/pull/976) Restrict PXF to listen to local requests only + - [#979](https://github.com/greenplum-db/pxf/pull/979) Add logging to the LineBreakAccessor for the write + - [#983](https://github.com/greenplum-db/pxf/pull/983) Bump Springboot to 2.7.12 + - [#984](https://github.com/greenplum-db/pxf/pull/984) Enable writing data in JSON format using *:json profiles + - [#989](https://github.com/greenplum-db/pxf/pull/989) Bump snappy to 1.1.10.1 + ### Bug Fixes: - - - [#940](https://github.com/greenplum-db/pxf/pull/940) Introduced options to handle decimal overflow when writing Parquet files + + - [#967](https://github.com/greenplum-db/pxf/pull/967) FDW: Fix for skipping the dropped and correctly counting Projection Index + - [#978](https://github.com/greenplum-db/pxf/pull/978) Added erroring out logic for decimal overflow for ORC draft: false prerelease: false diff --git a/CHANGELOG.md b/CHANGELOG.md index 9e673b962f..6b8616c8c7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,23 @@ # Changelog +## 6.7.0 (07/13/2023) + +### Enhancements: + +- [#956](https://github.com/greenplum-db/pxf/pull/956) Add pxfdelimited_import formatter to support multibyte delimiters for TEXT and CSV profiles +- [#960](https://github.com/greenplum-db/pxf/pull/960) Add support year with more than 4 digits in 'date' or 'timestamp' +- [#973](https://github.com/greenplum-db/pxf/pull/973) Enable write flow for FDW for non-text/csv formats +- [#976](https://github.com/greenplum-db/pxf/pull/976) Restrict PXF to listen to local requests only +- [#979](https://github.com/greenplum-db/pxf/pull/979) Add logging to the LineBreakAccessor for the write +- [#983](https://github.com/greenplum-db/pxf/pull/983) Bump Springboot to 2.7.12 +- [#984](https://github.com/greenplum-db/pxf/pull/984) Enable writing data in JSON format using *:json profiles +- [#989](https://github.com/greenplum-db/pxf/pull/989) Bump snappy to 1.1.10.1 + +### Bug Fixes: + +- [#967](https://github.com/greenplum-db/pxf/pull/967) FDW: Fix for skipping the dropped and correctly counting Projection Index +- [#978](https://github.com/greenplum-db/pxf/pull/978) Added erroring out logic for decimal overflow for ORC + ## 6.6.0 (04/06/2023) ### Enhancements: diff --git a/version b/version index 1b77cc46ff..f0e13c5090 100644 --- a/version +++ b/version @@ -1 +1 @@ -6.6.1-SNAPSHOT +6.7.0 From 1bd7fe0e2730ba6a667a2e35529ce220fef295d4 Mon Sep 17 00:00:00 2001 From: Roman Zolotov Date: Tue, 25 Jul 2023 17:05:52 +0300 Subject: [PATCH 32/35] ADBDEV-4011: Fix merge issue with Spring Boot Version 2.4.3 Spring Boot Version 2.4.3 adds org.mockito package with version 3.6.8. There is a bug with this version: https://github.com/mockito/mockito/pull/2545 So we need to keep additional invocation. --- .../plugins/hive/HiveMetastoreCompatibilityTest.java | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/server/pxf-hive/src/test/java/org/greenplum/pxf/plugins/hive/HiveMetastoreCompatibilityTest.java b/server/pxf-hive/src/test/java/org/greenplum/pxf/plugins/hive/HiveMetastoreCompatibilityTest.java index 1146bab982..ba0c9f7656 100644 --- a/server/pxf-hive/src/test/java/org/greenplum/pxf/plugins/hive/HiveMetastoreCompatibilityTest.java +++ b/server/pxf-hive/src/test/java/org/greenplum/pxf/plugins/hive/HiveMetastoreCompatibilityTest.java @@ -293,6 +293,17 @@ public void getTableFailedToConnectToMetastoreFiveRetries3rdSuccess() throws Exc } return null; }, + // placebo run through + // the second to last invocation keeps getting skipped so place this here as a placebo + // https://github.com/mockito/mockito/pull/2545 + invocation -> { + if (invocation.getMethod().getName().equals("get_table_req")) { + throw new TApplicationException("fallback ???"); + } else if (invocation.getMethod().getName().equals("get_table")) { + throw new TTransportException("oops. where's the metastore? ???"); + } + return null; + }, // final run through (retry 3 = success) invocation -> { if (invocation.getMethod().getName().equals("get_table_req")) { From aaffcd23e4e74bd542b663e3153de50dcb6d0649 Mon Sep 17 00:00:00 2001 From: Roman Zolotov Date: Tue, 25 Jul 2023 17:07:06 +0300 Subject: [PATCH 33/35] ADBDEV-4011: Fix merge issue with LocalServerPort annotation Spring Boot Version 2.4.3 contains this annotation in the 'org.springframework.boot.web.server' package --- .../src/test/java/org/greenplum/pxf/service/PxfMetricsIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/pxf-service/src/test/java/org/greenplum/pxf/service/PxfMetricsIT.java b/server/pxf-service/src/test/java/org/greenplum/pxf/service/PxfMetricsIT.java index d6de1cb19b..4c049a8056 100644 --- a/server/pxf-service/src/test/java/org/greenplum/pxf/service/PxfMetricsIT.java +++ b/server/pxf-service/src/test/java/org/greenplum/pxf/service/PxfMetricsIT.java @@ -12,7 +12,7 @@ import org.springframework.boot.test.autoconfigure.actuate.metrics.AutoConfigureMetrics; import org.springframework.boot.test.context.SpringBootTest; import org.springframework.boot.test.mock.mockito.MockBean; -import org.springframework.boot.test.web.server.LocalServerPort; +import org.springframework.boot.web.server.LocalServerPort; import org.springframework.test.web.reactive.server.WebTestClient; import org.springframework.util.MultiValueMap; From c690651953d3088b71d150c06bb91be46c615fb5 Mon Sep 17 00:00:00 2001 From: Roman Zolotov Date: Tue, 25 Jul 2023 17:20:08 +0300 Subject: [PATCH 34/35] ADBDEV-4011: Add backward compatability for jdbc.date.wideRange --- .../pxf/plugins/jdbc/JdbcBasePlugin.java | 27 +++++++++++++++++-- .../pxf/plugins/jdbc/JdbcResolver.java | 5 ++-- .../pxf/plugins/jdbc/JdbcBasePluginTest.java | 9 +++++++ .../pxf/plugins/jdbc/JdbcResolverTest.java | 5 +++- 4 files changed, 41 insertions(+), 5 deletions(-) diff --git a/server/pxf-jdbc/src/main/java/org/greenplum/pxf/plugins/jdbc/JdbcBasePlugin.java b/server/pxf-jdbc/src/main/java/org/greenplum/pxf/plugins/jdbc/JdbcBasePlugin.java index 3e88ec579d..2169f7f4f2 100644 --- a/server/pxf-jdbc/src/main/java/org/greenplum/pxf/plugins/jdbc/JdbcBasePlugin.java +++ b/server/pxf-jdbc/src/main/java/org/greenplum/pxf/plugins/jdbc/JdbcBasePlugin.java @@ -45,6 +45,7 @@ import java.util.Map; import java.util.Properties; import java.util.stream.Collectors; +import java.util.Objects; import static org.greenplum.pxf.api.security.SecureLogin.CONFIG_KEY_SERVICE_USER_IMPERSONATION; @@ -98,7 +99,7 @@ public class JdbcBasePlugin extends BasePlugin { private static final String HIVE_DEFAULT_DRIVER_CLASS = "org.apache.hive.jdbc.HiveDriver"; private static final String MYSQL_DRIVER_PREFIX = "com.mysql."; private static final String JDBC_DATE_WIDE_RANGE = "jdbc.date.wideRange"; - + private static final String JDBC_DATE_WIDE_RANGE_LEGACY = "jdbc.date.wide-range"; private enum TransactionIsolation { READ_UNCOMMITTED(1), READ_COMMITTED(2), @@ -386,7 +387,16 @@ public void afterPropertiesSet() { // Optional parameter to determine if the year might contain more than 4 digits in `date` or 'timestamp'. // The default value is false. - isDateWideRange = configuration.getBoolean(JDBC_DATE_WIDE_RANGE, false); + // We need to check the legacy parameter name for backward compatability with the open source project + String dateWideRangeConfig = configuration.get(JDBC_DATE_WIDE_RANGE_LEGACY); + String dateWideRangeContext = context.getOption(JDBC_DATE_WIDE_RANGE_LEGACY); + if (Objects.nonNull(dateWideRangeContext) || Objects.nonNull(dateWideRangeConfig)) { + LOG.warn("'{}' is a deprecated name of the parameter. Use 'date_wide_range' in the external table definition or " + + "'{}' in the jdbc-site.xml configuration file", JDBC_DATE_WIDE_RANGE_LEGACY, JDBC_DATE_WIDE_RANGE); + isDateWideRange = isDateWideRange(dateWideRangeContext); + } else { + isDateWideRange = configuration.getBoolean(JDBC_DATE_WIDE_RANGE, false); + } } /** @@ -615,4 +625,17 @@ private Map getPropsWithPrefix(Configuration configuration, Stri return configMap; } + /** + * Determine if the year might contain more than 4 digits in 'date' or 'timestamp' using the legacy parameter name. + * + * @param dateWideRangeContext value of the parameter from the context + * @return true if the year might contain more than 4 digits + */ + private boolean isDateWideRange(String dateWideRangeContext) { + if (Objects.nonNull(dateWideRangeContext)) { + return Boolean.parseBoolean(dateWideRangeContext); + } else { + return configuration.getBoolean(JDBC_DATE_WIDE_RANGE_LEGACY, false); + } + } } diff --git a/server/pxf-jdbc/src/main/java/org/greenplum/pxf/plugins/jdbc/JdbcResolver.java b/server/pxf-jdbc/src/main/java/org/greenplum/pxf/plugins/jdbc/JdbcResolver.java index fe477e5735..7816386135 100644 --- a/server/pxf-jdbc/src/main/java/org/greenplum/pxf/plugins/jdbc/JdbcResolver.java +++ b/server/pxf-jdbc/src/main/java/org/greenplum/pxf/plugins/jdbc/JdbcResolver.java @@ -19,6 +19,7 @@ * under the License. */ +import io.arenadata.security.encryption.client.service.DecryptClient; import org.greenplum.pxf.api.OneField; import org.greenplum.pxf.api.OneRow; import org.greenplum.pxf.api.io.DataType; @@ -152,8 +153,8 @@ public JdbcResolver() { * @param connectionManager connection manager * @param secureLogin the instance of the secure login */ - JdbcResolver(ConnectionManager connectionManager, SecureLogin secureLogin) { - super(connectionManager, secureLogin); + JdbcResolver(ConnectionManager connectionManager, SecureLogin secureLogin, DecryptClient decryptClient) { + super(connectionManager, secureLogin, decryptClient); } /** diff --git a/server/pxf-jdbc/src/test/java/org/greenplum/pxf/plugins/jdbc/JdbcBasePluginTest.java b/server/pxf-jdbc/src/test/java/org/greenplum/pxf/plugins/jdbc/JdbcBasePluginTest.java index 924fd5bf5a..4dd443f97e 100644 --- a/server/pxf-jdbc/src/test/java/org/greenplum/pxf/plugins/jdbc/JdbcBasePluginTest.java +++ b/server/pxf-jdbc/src/test/java/org/greenplum/pxf/plugins/jdbc/JdbcBasePluginTest.java @@ -497,6 +497,15 @@ public void testDateWideRangeFromConfiguration() throws SQLException { assertTrue(plugin.isDateWideRange); } + @Test + public void testDateWideRangeLegacyFromConfiguration() throws SQLException { + configuration.set("jdbc.driver", "org.greenplum.pxf.plugins.jdbc.FakeJdbcDriver"); + configuration.set("jdbc.url", "test-url"); + configuration.set("jdbc.date.wide-range", "true"); + JdbcBasePlugin plugin = getPlugin(mockConnectionManager, mockSecureLogin, context); + assertTrue(plugin.isDateWideRange); + } + private JdbcBasePlugin getPlugin(ConnectionManager mockConnectionManager, SecureLogin mockSecureLogin, RequestContext context) { JdbcBasePlugin plugin = new JdbcBasePlugin(mockConnectionManager, mockSecureLogin, mockDecryptClient); plugin.setRequestContext(context); diff --git a/server/pxf-jdbc/src/test/java/org/greenplum/pxf/plugins/jdbc/JdbcResolverTest.java b/server/pxf-jdbc/src/test/java/org/greenplum/pxf/plugins/jdbc/JdbcResolverTest.java index d253001689..c134fad49a 100644 --- a/server/pxf-jdbc/src/test/java/org/greenplum/pxf/plugins/jdbc/JdbcResolverTest.java +++ b/server/pxf-jdbc/src/test/java/org/greenplum/pxf/plugins/jdbc/JdbcResolverTest.java @@ -1,5 +1,6 @@ package org.greenplum.pxf.plugins.jdbc; +import io.arenadata.security.encryption.client.service.DecryptClient; import org.greenplum.pxf.api.OneField; import org.greenplum.pxf.api.OneRow; import org.greenplum.pxf.api.io.DataType; @@ -42,6 +43,8 @@ class JdbcResolverTest { private ConnectionManager mockConnectionManager; @Mock private SecureLogin mockSecureLogin; + @Mock + DecryptClient decryptClient; RequestContext context = new RequestContext(); List columnDescriptors = new ArrayList<>(); List oneFieldList = new ArrayList<>(); @@ -50,7 +53,7 @@ class JdbcResolverTest { @BeforeEach void setup() { - resolver = new JdbcResolver(mockConnectionManager, mockSecureLogin); + resolver = new JdbcResolver(mockConnectionManager, mockSecureLogin, decryptClient); } @Test From bcd36376e8a5cd268c04f264a77fa2aa1a13355b Mon Sep 17 00:00:00 2001 From: Roman Zolotov Date: Wed, 26 Jul 2023 18:11:40 +0300 Subject: [PATCH 35/35] ADBDEV-4011: Fix DemoTextResolver In the previous releases, if an external table has 'TEXT' output format a text row data was passed as a single field. In this release, the logic was changed. The new annotation was introduced. This annotation is used for marking Plugins as capable of handling InputStream with raw data. DemoTextResolver can handle InputStream, but it didn't have this annotation. --- .../java/org/greenplum/pxf/api/examples/DemoTextResolver.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/server/pxf-api/src/main/java/org/greenplum/pxf/api/examples/DemoTextResolver.java b/server/pxf-api/src/main/java/org/greenplum/pxf/api/examples/DemoTextResolver.java index 55eae11a5d..2bd2cbc0cd 100644 --- a/server/pxf-api/src/main/java/org/greenplum/pxf/api/examples/DemoTextResolver.java +++ b/server/pxf-api/src/main/java/org/greenplum/pxf/api/examples/DemoTextResolver.java @@ -22,6 +22,7 @@ import org.greenplum.pxf.api.OneField; import org.greenplum.pxf.api.OneRow; import org.greenplum.pxf.api.io.DataType; +import org.greenplum.pxf.api.model.InputStreamHandler; import java.io.ByteArrayOutputStream; import java.io.DataInputStream; @@ -33,6 +34,7 @@ *

* Demo implementation of resolver that returns text format */ +@InputStreamHandler public class DemoTextResolver extends DemoResolver { /**

Table 1. JSON Mapping