diff --git a/pbm-functional/pytest/Dockerfile b/pbm-functional/pytest/Dockerfile index e654d748..b82249e1 100644 --- a/pbm-functional/pytest/Dockerfile +++ b/pbm-functional/pytest/Dockerfile @@ -39,6 +39,7 @@ COPY --from=easyrsa /etc/x509/ /etc/x509/ COPY --from=easyrsa /etc/nginx-minio/ca.crt /etc/nginx-minio/ca.crt COPY conf/supervisord.d/ /etc/supervisord.d/ COPY conf/pbm/minio.yaml /etc/pbm.conf +COPY conf/pbm/minio-worm.yaml /etc/minio-worm.conf COPY conf/pbm/fs.yaml /etc/pbm-fs.conf COPY conf/pbm/pbm-1525.yaml /etc/pbm-1525.conf COPY conf/pbm/pbm-1043.yaml /etc/pbm-1043.conf diff --git a/pbm-functional/pytest/conf/pbm/minio-worm.yaml b/pbm-functional/pytest/conf/pbm/minio-worm.yaml new file mode 100644 index 00000000..53f10bf1 --- /dev/null +++ b/pbm-functional/pytest/conf/pbm/minio-worm.yaml @@ -0,0 +1,8 @@ +storage: + type: s3 + s3: + endpointUrl: http://minio:9000 + bucket: worm + credentials: + access-key-id: "minio1234" + secret-access-key: "minio1234" diff --git a/pbm-functional/pytest/docker-compose.yaml b/pbm-functional/pytest/docker-compose.yaml index 86e32add..5ff0deab 100644 --- a/pbm-functional/pytest/docker-compose.yaml +++ b/pbm-functional/pytest/docker-compose.yaml @@ -64,7 +64,7 @@ services: depends_on: - minio entrypoint: > - /bin/sh -c " sleep 5; /usr/bin/mc alias set myminio http://minio:9000 minio1234 minio1234; /usr/bin/mc mb myminio/bcp; exit 0; " + /bin/sh -c " sleep 5; /usr/bin/mc alias set myminio http://minio:9000 minio1234 minio1234; /usr/bin/mc mb myminio/bcp; /usr/bin/mc mb --with-lock myminio/worm; exit 0; " pykmip: image: pykmip/local diff --git a/pbm-functional/pytest/test_worm_profile.py b/pbm-functional/pytest/test_worm_profile.py new file mode 100644 index 00000000..9479cc9b --- /dev/null +++ b/pbm-functional/pytest/test_worm_profile.py @@ -0,0 +1,59 @@ +import pytest +import pymongo +import time +import os + +from cluster import Cluster + +@pytest.fixture(scope="package") +def config(): + return { "mongos": "mongos", + "configserver": + {"_id": "rscfg", "members": [{"host":"rscfg01"}]}, + "shards":[ + {"_id": "rs1", "members": [{"host":"rs101"}]}, + {"_id": "rs2", "members": [{"host":"rs201"}]} + ]} + +@pytest.fixture(scope="package") +def cluster(config): + return Cluster(config) + +@pytest.fixture(scope="function") +def start_cluster(cluster,request): + try: + cluster.destroy() + os.chmod("/backups",0o777) + os.system("rm -rf /backups/*") + cluster.create() + cluster.setup_pbm() + profile=cluster.exec_pbm_cli("profile add worm /etc/minio-worm.conf --wait") + assert profile.rc==0, profile.stderr + assert "OK" in profile.stdout, profile.stdout + client=pymongo.MongoClient(cluster.connection) + client.admin.command("enableSharding", "test") + client.admin.command("shardCollection", "test.test", key={"_id": "hashed"}) + yield True + + finally: + if request.config.getoption("--verbose"): + cluster.get_logs() + cluster.destroy(cleanup_backups=True) + +@pytest.mark.timeout(300,func_only=True) +@pytest.mark.parametrize('backup_type',['logical','physical']) +def test_worm_profile(start_cluster,cluster,backup_type): + client=pymongo.MongoClient(cluster.connection) + for i in range(300): + client['test']['test'].insert_one({"doc":i}) + + backup=cluster.make_backup(backup_type + " --profile worm") + pymongo.MongoClient(cluster.connection).drop_database('test') + Cluster.log("Attempt restore " + backup_type + " backup from the worm storage") + if backup_type == 'logical': + cluster.make_restore(backup, check_pbm_status=True) + else: + cluster.make_restore(backup, restart_cluster=True, check_pbm_status=True) + time.sleep(5) + assert pymongo.MongoClient(cluster.connection)["test"]["test"].count_documents({}) == 300 + assert pymongo.MongoClient(cluster.connection)["test"].command("collstats", "test").get("sharded", False)